]> review.fuel-infra Code Review - packages/trusty/rabbitmq-server.git/commitdiff
Rabbitmq 3.5.4 for MOS 7.0 78/9778/3
authorAleksandr Mogylchenko <amogylchenko@mirantis.com>
Thu, 23 Jul 2015 05:44:10 +0000 (08:44 +0300)
committerAleksandr Mogylchenko <amogylchenko@mirantis.com>
Thu, 6 Aug 2015 20:40:14 +0000 (23:40 +0300)
Source picked up from Debian Sid:
http://http.debian.net/debian/pool/main/r/rabbitmq-server/rabbitmq-server_3.5.4-1.dsc
http://http.debian.net/debian/pool/main/r/rabbitmq-server/rabbitmq-server_3.5.4.orig.tar.gz
http://http.debian.net/debian/pool/main/r/rabbitmq-server/rabbitmq-server_3.5.4-1.debian.tar.xz

Closes-Bug: #1463000
Change-Id: Ie1c58830295f6feb5d45e97a42129ac3f8d821e9

529 files changed:
debian/LICENSE.head [deleted file]
debian/LICENSE.tail [deleted file]
debian/README [deleted file]
debian/changelog
debian/compat
debian/control
debian/copyright
debian/dirs [deleted file]
debian/gbp.conf [new file with mode: 0644]
debian/ocf/rabbitmq-server [moved from debian/rabbitmq-server.ocf with 100% similarity, mode: 0755]
debian/rabbitmq-env.conf [new file with mode: 0644]
debian/rabbitmq-script-wrapper [changed mode: 0644->0755]
debian/rabbitmq-server-wait [new file with mode: 0755]
debian/rabbitmq-server.default
debian/rabbitmq-server.dirs [new file with mode: 0644]
debian/rabbitmq-server.install [new file with mode: 0644]
debian/rabbitmq-server.links [new file with mode: 0644]
debian/rabbitmq-server.postinst [moved from debian/postinst with 100% similarity]
debian/rabbitmq-server.postrm [moved from debian/postrm.in with 100% similarity]
debian/rabbitmq-server.service [new file with mode: 0644]
debian/rules [changed mode: 0644->0755]
debian/watch
rabbitmq-server/LICENSE
rabbitmq-server/Makefile
rabbitmq-server/README
rabbitmq-server/codegen.py
rabbitmq-server/codegen/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/docs/rabbitmq-plugins.1.xml
rabbitmq-server/docs/rabbitmq-service.xml
rabbitmq-server/docs/rabbitmq.config.example
rabbitmq-server/docs/rabbitmqctl.1.xml
rabbitmq-server/ebin/rabbit_app.in
rabbitmq-server/include/gm_specs.hrl
rabbitmq-server/include/rabbit.hrl
rabbitmq-server/include/rabbit_cli.hrl [new file with mode: 0644]
rabbitmq-server/include/rabbit_msg_store.hrl
rabbitmq-server/plugins-src/Makefile
rabbitmq-server/plugins-src/README
rabbitmq-server/plugins-src/cowboy-wrapper/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/cowboy-wrapper/cowboy-git/src/cowboy_http_req.erl.orig [deleted file]
rabbitmq-server/plugins-src/do-package.mk
rabbitmq-server/plugins-src/eldap-wrapper/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/mochiweb-wrapper/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/mochiweb-wrapper/mochiweb-git/src/mochiweb_request.erl.orig [deleted file]
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/README.md
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/codegen.py
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/src/rabbit_amqp1_0_binary_generator.erl
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/src/rabbit_amqp1_0_framing.erl
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/src/rabbit_amqp1_0_incoming_link.erl
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/src/rabbit_amqp1_0_outgoing_link.erl
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/src/rabbit_amqp1_0_reader.erl
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/src/rabbit_amqp1_0_session_process.erl
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/test/swiftmq/Makefile
rabbitmq-server/plugins-src/rabbitmq-amqp1.0/test/swiftmq/test/com/rabbitmq/amqp1_0/tests/swiftmq/SwiftMQTests.java
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/example/setup.sh
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/package.mk
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/src/rabbit_auth_backend_ldap.erl
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/src/rabbit_auth_backend_ldap_app.erl
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/src/rabbit_auth_backend_ldap_util.erl
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/src/rabbitmq_auth_backend_ldap.app.src
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/test/src/rabbit_auth_backend_ldap_test.erl
rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/test/src/rabbit_auth_backend_ldap_unit_test.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-auth-mechanism-ssl/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-auth-mechanism-ssl/src/rabbit_auth_mechanism_ssl.erl
rabbitmq-server/plugins-src/rabbitmq-auth-mechanism-ssl/src/rabbit_auth_mechanism_ssl_app.erl
rabbitmq-server/plugins-src/rabbitmq-consistent-hash-exchange/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-consistent-hash-exchange/src/rabbit_exchange_type_consistent_hash.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-erlang-client/Makefile
rabbitmq-server/plugins-src/rabbitmq-erlang-client/Makefile.in
rabbitmq-server/plugins-src/rabbitmq-erlang-client/common.mk
rabbitmq-server/plugins-src/rabbitmq-erlang-client/ebin/amqp_client.app.in
rabbitmq-server/plugins-src/rabbitmq-erlang-client/include/amqp_client.hrl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/include/amqp_client_internal.hrl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/include/amqp_gen_consumer_spec.hrl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/include/rabbit_routing_prefixes.hrl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/rabbit_common.app.in
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_auth_mechanisms.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_channel.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_channel_sup.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_channel_sup_sup.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_channels_manager.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_client.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_connection.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_connection_sup.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_connection_type_sup.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_direct_connection.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_direct_consumer.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_gen_connection.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_gen_consumer.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_main_reader.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_network_connection.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_rpc_client.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_rpc_server.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_selective_consumer.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_sup.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/amqp_uri.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/src/rabbit_routing_util.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/test.mk
rabbitmq-server/plugins-src/rabbitmq-erlang-client/test/Makefile
rabbitmq-server/plugins-src/rabbitmq-erlang-client/test/amqp_client_SUITE.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/test/amqp_dbg.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/test/negative_test_util.erl
rabbitmq-server/plugins-src/rabbitmq-erlang-client/test/test_util.erl
rabbitmq-server/plugins-src/rabbitmq-federation-management/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-federation-management/priv/www/js/federation.js
rabbitmq-server/plugins-src/rabbitmq-federation-management/priv/www/js/tmpl/federation-upstream.ejs
rabbitmq-server/plugins-src/rabbitmq-federation-management/priv/www/js/tmpl/federation.ejs
rabbitmq-server/plugins-src/rabbitmq-federation-management/src/rabbit_federation_mgmt.erl
rabbitmq-server/plugins-src/rabbitmq-federation/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-federation/include/rabbit_federation.hrl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_app.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_db.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_event.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_exchange.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_exchange_link.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_exchange_link_sup_sup.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_link_sup.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_link_util.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_parameters.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_queue.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_queue_link.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_queue_link_sup_sup.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_status.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_sup.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_upstream.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_upstream_exchange.erl
rabbitmq-server/plugins-src/rabbitmq-federation/src/rabbit_federation_util.erl
rabbitmq-server/plugins-src/rabbitmq-federation/test/src/rabbit_federation_exchange_test.erl
rabbitmq-server/plugins-src/rabbitmq-federation/test/src/rabbit_federation_queue_test.erl
rabbitmq-server/plugins-src/rabbitmq-federation/test/src/rabbit_federation_test_util.erl
rabbitmq-server/plugins-src/rabbitmq-federation/test/src/rabbit_federation_unit_test.erl
rabbitmq-server/plugins-src/rabbitmq-management-agent/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management-agent/src/rabbit_mgmt_agent_app.erl
rabbitmq-server/plugins-src/rabbitmq-management-agent/src/rabbit_mgmt_agent_sup.erl
rabbitmq-server/plugins-src/rabbitmq-management-agent/src/rabbit_mgmt_db_handler.erl
rabbitmq-server/plugins-src/rabbitmq-management-agent/src/rabbit_mgmt_external_stats.erl
rabbitmq-server/plugins-src/rabbitmq-management-agent/src/rabbitmq_management_agent.app.src
rabbitmq-server/plugins-src/rabbitmq-management-visualiser/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management-visualiser/priv/www/visualiser/js/glMatrix-min.js
rabbitmq-server/plugins-src/rabbitmq-management/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/LICENSE-MPL-RabbitMQ
rabbitmq-server/plugins-src/rabbitmq-management/bin/rabbitmqadmin
rabbitmq-server/plugins-src/rabbitmq-management/etc/rabbit-test.config
rabbitmq-server/plugins-src/rabbitmq-management/include/rabbit_mgmt.hrl
rabbitmq-server/plugins-src/rabbitmq-management/package.mk
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/api/index.html
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/css/main.css
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/doc/stats.html
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/img/bg-binary.png [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/charts.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/dispatcher.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/formatters.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/global.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/help.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/main.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/prefs.js
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/binary.ejs [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/channel.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/channels-list.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/columns-options.ejs [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/connection.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/connections.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/exchange.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/exchanges.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory-bar.ejs [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory-table.ejs [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/msg-detail-deliveries.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/msg-detail-publishes.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/node.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/overview.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/partition.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/paths.ejs [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/policies.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/policy.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/queue.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/queues.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/rate-options.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/users.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/vhost.ejs
rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/vhosts.ejs
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_app.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_db.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_dispatcher.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_extension.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_format.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_load_definitions.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_reset_handler.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_stats.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_sup.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_sup_sup.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_util.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_aliveness_test.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_binding.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_bindings.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_channel.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_channels.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_cluster_name.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_connection.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_connection_channels.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_connections.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_consumers.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_definitions.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_exchange.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_exchange_publish.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_exchanges.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_extensions.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_node.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_nodes.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_overview.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_parameter.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_parameters.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_permission.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_permissions.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_permissions_user.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_permissions_vhost.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_policies.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_policy.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_queue.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_queue_get.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_queue_purge.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_queues.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_user.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_users.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_vhost.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_vhosts.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_whoami.erl
rabbitmq-server/plugins-src/rabbitmq-management/src/rabbitmq_management.app.src
rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbit_mgmt_test_db.erl
rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbit_mgmt_test_http.erl
rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbit_mgmt_test_unit.erl
rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbitmqadmin-test-wrapper.sh [new file with mode: 0755]
rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbitmqadmin-test.py
rabbitmq-server/plugins-src/rabbitmq-mqtt/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/include/rabbit_mqtt.hrl
rabbitmq-server/plugins-src/rabbitmq-mqtt/include/rabbit_mqtt_frame.hrl
rabbitmq-server/plugins-src/rabbitmq-mqtt/package.mk
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_collector.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_connection_sup.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_frame.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_processor.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_reader.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_sup.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbit_mqtt_util.erl
rabbitmq-server/plugins-src/rabbitmq-mqtt/src/rabbitmq_mqtt.app.src
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/Makefile
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/build.properties [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/build.xml [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/lib/junit.jar [moved from rabbitmq-server/plugins-src/rabbitmq-mqtt/lib/junit.jar with 100% similarity]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/rabbit-test.sh [new file with mode: 0755]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/setup-rabbit-test.sh [new file with mode: 0755]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/MqttTest.java
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/rabbit-test.sh [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/setup-rabbit-test.sh [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/tls/MutualAuth.java [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/test.config [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-mqtt/test/test.sh
rabbitmq-server/plugins-src/rabbitmq-shovel-management/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-shovel-management/priv/www/js/shovel.js
rabbitmq-server/plugins-src/rabbitmq-shovel-management/priv/www/js/tmpl/dynamic-shovel.ejs
rabbitmq-server/plugins-src/rabbitmq-shovel-management/src/rabbit_shovel_mgmt.erl
rabbitmq-server/plugins-src/rabbitmq-shovel-management/test/src/rabbit_shovel_mgmt_test_all.erl
rabbitmq-server/plugins-src/rabbitmq-shovel-management/test/src/rabbit_shovel_mgmt_test_http.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-shovel/include/rabbit_shovel.hrl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_config.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_dyn_worker_sup.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_parameters.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_status.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_sup.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_util.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_worker.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_worker_sup.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/test/src/rabbit_shovel_test.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/test/src/rabbit_shovel_test_all.erl
rabbitmq-server/plugins-src/rabbitmq-shovel/test/src/rabbit_shovel_test_dyn.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-stomp/README.md
rabbitmq-server/plugins-src/rabbitmq-stomp/deps/stomppy/Makefile
rabbitmq-server/plugins-src/rabbitmq-stomp/deps/stomppy/rabbit.patch [deleted file]
rabbitmq-server/plugins-src/rabbitmq-stomp/include/rabbit_stomp.hrl
rabbitmq-server/plugins-src/rabbitmq-stomp/include/rabbit_stomp_frame.hrl
rabbitmq-server/plugins-src/rabbitmq-stomp/include/rabbit_stomp_headers.hrl
rabbitmq-server/plugins-src/rabbitmq-stomp/package.mk
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp_client_sup.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp_frame.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp_processor.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp_reader.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp_sup.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/src/rabbit_stomp_util.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/ack.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/base.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/connect_options.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/destinations.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/errors.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/lifecycle.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/non_ssl.config [deleted file]
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/parsing.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/rabbit_stomp_amqqueue_test.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/rabbit_stomp_publish_test.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/rabbit_stomp_test.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/rabbit_stomp_test_frame.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/rabbit_stomp_test_util.erl
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/reliability.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/ssl_lifecycle.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/test.config [moved from rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/ssl.config with 100% similarity]
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/test.py
rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/transactions.py
rabbitmq-server/plugins-src/rabbitmq-test/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-test/Makefile
rabbitmq-server/plugins-src/rabbitmq-test/README
rabbitmq-server/plugins-src/rabbitmq-test/package.mk
rabbitmq-server/plugins-src/rabbitmq-test/src/inet_proxy_dist.erl
rabbitmq-server/plugins-src/rabbitmq-test/src/inet_tcp_proxy.erl
rabbitmq-server/plugins-src/rabbitmq-test/src/inet_tcp_proxy_manager.erl
rabbitmq-server/plugins-src/rabbitmq-test/src/rabbit_test_configs.erl
rabbitmq-server/plugins-src/rabbitmq-test/src/rabbit_test_runner.erl
rabbitmq-server/plugins-src/rabbitmq-test/src/rabbit_test_util.erl
rabbitmq-server/plugins-src/rabbitmq-test/test/src/cluster_rename.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-test/test/src/clustering_management.erl
rabbitmq-server/plugins-src/rabbitmq-test/test/src/crashing_queues.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-test/test/src/dynamic_ha.erl
rabbitmq-server/plugins-src/rabbitmq-test/test/src/partitions.erl
rabbitmq-server/plugins-src/rabbitmq-test/test/src/rabbit_priority_queue_test.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-test/test/src/simple_ha.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-tracing/README
rabbitmq-server/plugins-src/rabbitmq-tracing/priv/www/js/tmpl/traces.ejs
rabbitmq-server/plugins-src/rabbitmq-tracing/priv/www/js/tracing.js
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_app.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_consumer.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_consumer_sup.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_files.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_mgmt.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_sup.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_traces.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_wm_file.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_wm_files.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_wm_trace.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/src/rabbit_tracing_wm_traces.erl
rabbitmq-server/plugins-src/rabbitmq-tracing/test/src/rabbit_tracing_test.erl
rabbitmq-server/plugins-src/rabbitmq-web-dispatch/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-web-dispatch/src/rabbit_web_dispatch_registry.erl
rabbitmq-server/plugins-src/rabbitmq-web-dispatch/src/rabbit_web_dispatch_sup.erl
rabbitmq-server/plugins-src/rabbitmq-web-dispatch/src/rabbit_webmachine.erl
rabbitmq-server/plugins-src/rabbitmq-web-dispatch/src/rabbit_webmachine_error_handler.erl [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/README.md
rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/priv/bunny.html
rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/priv/echo.html
rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/priv/sockjs-0.3.js [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/priv/temp-queue.html
rabbitmq-server/plugins-src/rabbitmq-web-stomp/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/rabbitmq-web-stomp/README.md
rabbitmq-server/plugins-src/rabbitmq-web-stomp/src/rabbit_ws_sockjs.erl
rabbitmq-server/plugins-src/rabbitmq-web-stomp/test/src/stomp.erl
rabbitmq-server/plugins-src/release.mk
rabbitmq-server/plugins-src/sockjs-erlang-wrapper/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/plugins-src/webmachine-wrapper/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/scripts/rabbitmq-defaults
rabbitmq-server/scripts/rabbitmq-defaults.bat [new file with mode: 0755]
rabbitmq-server/scripts/rabbitmq-echopid.bat
rabbitmq-server/scripts/rabbitmq-env
rabbitmq-server/scripts/rabbitmq-env.bat [new file with mode: 0755]
rabbitmq-server/scripts/rabbitmq-plugins
rabbitmq-server/scripts/rabbitmq-plugins.bat
rabbitmq-server/scripts/rabbitmq-server
rabbitmq-server/scripts/rabbitmq-server.bat
rabbitmq-server/scripts/rabbitmq-service.bat
rabbitmq-server/scripts/rabbitmqctl
rabbitmq-server/scripts/rabbitmqctl.bat
rabbitmq-server/src/app_utils.erl
rabbitmq-server/src/background_gc.erl
rabbitmq-server/src/credit_flow.erl
rabbitmq-server/src/delegate.erl
rabbitmq-server/src/delegate_sup.erl
rabbitmq-server/src/dtree.erl
rabbitmq-server/src/file_handle_cache.erl
rabbitmq-server/src/file_handle_cache_stats.erl [new file with mode: 0644]
rabbitmq-server/src/gatherer.erl
rabbitmq-server/src/gen_server2.erl
rabbitmq-server/src/gm.erl
rabbitmq-server/src/gm_soak_test.erl [deleted file]
rabbitmq-server/src/gm_speed_test.erl [deleted file]
rabbitmq-server/src/gm_tests.erl [deleted file]
rabbitmq-server/src/lqueue.erl
rabbitmq-server/src/mirrored_supervisor.erl
rabbitmq-server/src/mirrored_supervisor_sups.erl [new file with mode: 0644]
rabbitmq-server/src/mirrored_supervisor_tests.erl [deleted file]
rabbitmq-server/src/mnesia_sync.erl
rabbitmq-server/src/pg_local.erl
rabbitmq-server/src/pmon.erl
rabbitmq-server/src/priority_queue.erl
rabbitmq-server/src/rabbit.erl
rabbitmq-server/src/rabbit_access_control.erl
rabbitmq-server/src/rabbit_alarm.erl
rabbitmq-server/src/rabbit_amqqueue.erl
rabbitmq-server/src/rabbit_amqqueue_process.erl
rabbitmq-server/src/rabbit_amqqueue_sup.erl
rabbitmq-server/src/rabbit_amqqueue_sup_sup.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_auth_backend_dummy.erl
rabbitmq-server/src/rabbit_auth_backend_internal.erl
rabbitmq-server/src/rabbit_auth_mechanism.erl
rabbitmq-server/src/rabbit_auth_mechanism_amqplain.erl
rabbitmq-server/src/rabbit_auth_mechanism_cr_demo.erl
rabbitmq-server/src/rabbit_auth_mechanism_plain.erl
rabbitmq-server/src/rabbit_authn_backend.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_authz_backend.erl [moved from rabbitmq-server/src/rabbit_auth_backend.erl with 54% similarity]
rabbitmq-server/src/rabbit_autoheal.erl
rabbitmq-server/src/rabbit_backing_queue.erl
rabbitmq-server/src/rabbit_backing_queue_qc.erl [deleted file]
rabbitmq-server/src/rabbit_basic.erl
rabbitmq-server/src/rabbit_binary_generator.erl
rabbitmq-server/src/rabbit_binary_parser.erl
rabbitmq-server/src/rabbit_binding.erl
rabbitmq-server/src/rabbit_channel.erl
rabbitmq-server/src/rabbit_channel_interceptor.erl
rabbitmq-server/src/rabbit_channel_sup.erl
rabbitmq-server/src/rabbit_channel_sup_sup.erl
rabbitmq-server/src/rabbit_cli.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_client_sup.erl
rabbitmq-server/src/rabbit_command_assembler.erl
rabbitmq-server/src/rabbit_connection_helper_sup.erl
rabbitmq-server/src/rabbit_connection_sup.erl
rabbitmq-server/src/rabbit_control_main.erl
rabbitmq-server/src/rabbit_dead_letter.erl
rabbitmq-server/src/rabbit_diagnostics.erl
rabbitmq-server/src/rabbit_direct.erl
rabbitmq-server/src/rabbit_disk_monitor.erl
rabbitmq-server/src/rabbit_epmd_monitor.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_error_logger.erl
rabbitmq-server/src/rabbit_error_logger_file_h.erl
rabbitmq-server/src/rabbit_event.erl
rabbitmq-server/src/rabbit_exchange.erl
rabbitmq-server/src/rabbit_exchange_decorator.erl
rabbitmq-server/src/rabbit_exchange_type.erl
rabbitmq-server/src/rabbit_exchange_type_direct.erl
rabbitmq-server/src/rabbit_exchange_type_fanout.erl
rabbitmq-server/src/rabbit_exchange_type_headers.erl
rabbitmq-server/src/rabbit_exchange_type_invalid.erl
rabbitmq-server/src/rabbit_exchange_type_topic.erl
rabbitmq-server/src/rabbit_file.erl
rabbitmq-server/src/rabbit_framing.erl
rabbitmq-server/src/rabbit_guid.erl
rabbitmq-server/src/rabbit_heartbeat.erl
rabbitmq-server/src/rabbit_limiter.erl
rabbitmq-server/src/rabbit_log.erl
rabbitmq-server/src/rabbit_memory_monitor.erl
rabbitmq-server/src/rabbit_mirror_queue_coordinator.erl
rabbitmq-server/src/rabbit_mirror_queue_master.erl
rabbitmq-server/src/rabbit_mirror_queue_misc.erl
rabbitmq-server/src/rabbit_mirror_queue_mode.erl
rabbitmq-server/src/rabbit_mirror_queue_mode_all.erl
rabbitmq-server/src/rabbit_mirror_queue_mode_exactly.erl
rabbitmq-server/src/rabbit_mirror_queue_mode_nodes.erl
rabbitmq-server/src/rabbit_mirror_queue_slave.erl
rabbitmq-server/src/rabbit_mirror_queue_slave_sup.erl [deleted file]
rabbitmq-server/src/rabbit_mirror_queue_sync.erl
rabbitmq-server/src/rabbit_misc.erl
rabbitmq-server/src/rabbit_mnesia.erl
rabbitmq-server/src/rabbit_mnesia_rename.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_msg_file.erl
rabbitmq-server/src/rabbit_msg_store.erl
rabbitmq-server/src/rabbit_msg_store_ets_index.erl
rabbitmq-server/src/rabbit_msg_store_gc.erl
rabbitmq-server/src/rabbit_msg_store_index.erl
rabbitmq-server/src/rabbit_net.erl
rabbitmq-server/src/rabbit_networking.erl
rabbitmq-server/src/rabbit_node_monitor.erl
rabbitmq-server/src/rabbit_nodes.erl
rabbitmq-server/src/rabbit_parameter_validation.erl
rabbitmq-server/src/rabbit_plugins.erl
rabbitmq-server/src/rabbit_plugins_main.erl
rabbitmq-server/src/rabbit_policies.erl
rabbitmq-server/src/rabbit_policy.erl
rabbitmq-server/src/rabbit_policy_validator.erl
rabbitmq-server/src/rabbit_prelaunch.erl
rabbitmq-server/src/rabbit_prequeue.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_priority_queue.erl [new file with mode: 0644]
rabbitmq-server/src/rabbit_queue_collector.erl
rabbitmq-server/src/rabbit_queue_consumers.erl
rabbitmq-server/src/rabbit_queue_decorator.erl
rabbitmq-server/src/rabbit_queue_index.erl
rabbitmq-server/src/rabbit_reader.erl
rabbitmq-server/src/rabbit_recovery_terms.erl
rabbitmq-server/src/rabbit_registry.erl
rabbitmq-server/src/rabbit_restartable_sup.erl
rabbitmq-server/src/rabbit_router.erl
rabbitmq-server/src/rabbit_runtime_parameter.erl
rabbitmq-server/src/rabbit_runtime_parameters.erl
rabbitmq-server/src/rabbit_runtime_parameters_test.erl [deleted file]
rabbitmq-server/src/rabbit_sasl_report_file_h.erl
rabbitmq-server/src/rabbit_ssl.erl
rabbitmq-server/src/rabbit_sup.erl
rabbitmq-server/src/rabbit_table.erl
rabbitmq-server/src/rabbit_tests.erl [deleted file]
rabbitmq-server/src/rabbit_tests_event_receiver.erl [deleted file]
rabbitmq-server/src/rabbit_trace.erl
rabbitmq-server/src/rabbit_types.erl
rabbitmq-server/src/rabbit_upgrade.erl
rabbitmq-server/src/rabbit_upgrade_functions.erl
rabbitmq-server/src/rabbit_variable_queue.erl
rabbitmq-server/src/rabbit_version.erl
rabbitmq-server/src/rabbit_vhost.erl
rabbitmq-server/src/rabbit_vm.erl
rabbitmq-server/src/rabbit_writer.erl
rabbitmq-server/src/supervised_lifecycle.erl
rabbitmq-server/src/supervisor2.erl
rabbitmq-server/src/supervisor2_tests.erl [deleted file]
rabbitmq-server/src/tcp_acceptor.erl
rabbitmq-server/src/tcp_acceptor_sup.erl
rabbitmq-server/src/tcp_listener.erl
rabbitmq-server/src/tcp_listener_sup.erl
rabbitmq-server/src/test_sup.erl [deleted file]
rabbitmq-server/src/truncate.erl
rabbitmq-server/src/vm_memory_monitor.erl
rabbitmq-server/src/vm_memory_monitor_tests.erl [deleted file]
rabbitmq-server/src/worker_pool.erl
rabbitmq-server/src/worker_pool_sup.erl
rabbitmq-server/src/worker_pool_worker.erl
rabbitmq-server/version.mk

diff --git a/debian/LICENSE.head b/debian/LICENSE.head
deleted file mode 100644 (file)
index 2b5a17e..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-This package, the RabbitMQ server is licensed under the MPL.
-
-If you have any questions regarding licensing, please contact us at
-info@rabbitmq.com.
-
diff --git a/debian/LICENSE.tail b/debian/LICENSE.tail
deleted file mode 100644 (file)
index 7858a04..0000000
+++ /dev/null
@@ -1,516 +0,0 @@
-
-The MIT license is as follows:
-
-        "Permission is hereby granted, free of charge, to any person
-        obtaining a copy of this file (the Software), to deal in the
-        Software without restriction, including without limitation the
-        rights to use, copy, modify, merge, publish, distribute,
-        sublicense, and/or sell copies of the Software, and to permit
-        persons to whom the Software is furnished to do so, subject to
-        the following conditions:
-
-        The above copyright notice and this permission notice shall be
-        included in all copies or substantial portions of the Software.
-
-        THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-        EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-        OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-        NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-        HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-        WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-        FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-        OTHER DEALINGS IN THE SOFTWARE."
-
-
-The BSD 2-Clause license is as follows:
-
-        "Redistribution and use in source and binary forms, with or
-        without modification, are permitted provided that the
-        following conditions are met:
-
-        1. Redistributions of source code must retain the above
-        copyright notice, this list of conditions and the following
-        disclaimer.
-
-        2. Redistributions in binary form must reproduce the above
-        copyright notice, this list of conditions and the following
-        disclaimer in the documentation and/or other materials
-        provided with the distribution.
-
-        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-        CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
-        INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-        MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-        DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-        CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-        SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-        NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-        LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-        HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-        CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-        OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-        EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
-
-
-The rest of this package is licensed under the Mozilla Public License 1.1
-Authors and Copyright are as described below:
-
-     The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-
-
-                          MOZILLA PUBLIC LICENSE
-                                Version 1.1
-
-                              ---------------
-
-1. Definitions.
-
-     1.0.1. "Commercial Use" means distribution or otherwise making the
-     Covered Code available to a third party.
-
-     1.1. "Contributor" means each entity that creates or contributes to
-     the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original
-     Code, prior Modifications used by a Contributor, and the Modifications
-     made by that particular Contributor.
-
-     1.3. "Covered Code" means the Original Code or Modifications or the
-     combination of the Original Code and Modifications, in each case
-     including portions thereof.
-
-     1.4. "Electronic Distribution Mechanism" means a mechanism generally
-     accepted in the software development community for the electronic
-     transfer of data.
-
-     1.5. "Executable" means Covered Code in any form other than Source
-     Code.
-
-     1.6. "Initial Developer" means the individual or entity identified
-     as the Initial Developer in the Source Code notice required by Exhibit
-     A.
-
-     1.7. "Larger Work" means a work which combines Covered Code or
-     portions thereof with code not governed by the terms of this License.
-
-     1.8. "License" means this document.
-
-     1.8.1. "Licensable" means having the right to grant, to the maximum
-     extent possible, whether at the time of the initial grant or
-     subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means any addition to or deletion from the
-     substance or structure of either the Original Code or any previous
-     Modifications. When Covered Code is released as a series of files, a
-     Modification is:
-          A. Any addition to or deletion from the contents of a file
-          containing Original Code or previous Modifications.
-
-          B. Any new file that contains any part of the Original Code or
-          previous Modifications.
-
-     1.10. "Original Code" means Source Code of computer software code
-     which is described in the Source Code notice required by Exhibit A as
-     Original Code, and which, at the time of its release under this
-     License is not already Covered Code governed by this License.
-
-     1.10.1. "Patent Claims" means any patent claim(s), now owned or
-     hereafter acquired, including without limitation,  method, process,
-     and apparatus claims, in any patent Licensable by grantor.
-
-     1.11. "Source Code" means the preferred form of the Covered Code for
-     making modifications to it, including all modules it contains, plus
-     any associated interface definition files, scripts used to control
-     compilation and installation of an Executable, or source code
-     differential comparisons against either the Original Code or another
-     well known, available Covered Code of the Contributor's choice. The
-     Source Code can be in a compressed or archival form, provided the
-     appropriate decompression or de-archiving software is widely available
-     for no charge.
-
-     1.12. "You" (or "Your")  means an individual or a legal entity
-     exercising rights under, and complying with all of the terms of, this
-     License or a future version of this License issued under Section 6.1.
-     For legal entities, "You" includes any entity which controls, is
-     controlled by, or is under common control with You. For purposes of
-     this definition, "control" means (a) the power, direct or indirect,
-     to cause the direction or management of such entity, whether by
-     contract or otherwise, or (b) ownership of more than fifty percent
-     (50%) of the outstanding shares or beneficial ownership of such
-     entity.
-
-2. Source Code License.
-
-     2.1. The Initial Developer Grant.
-     The Initial Developer hereby grants You a world-wide, royalty-free,
-     non-exclusive license, subject to third party intellectual property
-     claims:
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Initial Developer to use, reproduce,
-          modify, display, perform, sublicense and distribute the Original
-          Code (or portions thereof) with or without Modifications, and/or
-          as part of a Larger Work; and
-
-          (b) under Patents Claims infringed by the making, using or
-          selling of Original Code, to make, have made, use, practice,
-          sell, and offer for sale, and/or otherwise dispose of the
-          Original Code (or portions thereof).
-
-          (c) the licenses granted in this Section 2.1(a) and (b) are
-          effective on the date Initial Developer first distributes
-          Original Code under the terms of this License.
-
-          (d) Notwithstanding Section 2.1(b) above, no patent license is
-          granted: 1) for code that You delete from the Original Code; 2)
-          separate from the Original Code;  or 3) for infringements caused
-          by: i) the modification of the Original Code or ii) the
-          combination of the Original Code with other software or devices.
-
-     2.2. Contributor Grant.
-     Subject to third party intellectual property claims, each Contributor
-     hereby grants You a world-wide, royalty-free, non-exclusive license
-
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Contributor, to use, reproduce, modify,
-          display, perform, sublicense and distribute the Modifications
-          created by such Contributor (or portions thereof) either on an
-          unmodified basis, with other Modifications, as Covered Code
-          and/or as part of a Larger Work; and
-
-          (b) under Patent Claims infringed by the making, using, or
-          selling of  Modifications made by that Contributor either alone
-          and/or in combination with its Contributor Version (or portions
-          of such combination), to make, use, sell, offer for sale, have
-          made, and/or otherwise dispose of: 1) Modifications made by that
-          Contributor (or portions thereof); and 2) the combination of
-          Modifications made by that Contributor with its Contributor
-          Version (or portions of such combination).
-
-          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
-          effective on the date Contributor first makes Commercial Use of
-          the Covered Code.
-
-          (d)    Notwithstanding Section 2.2(b) above, no patent license is
-          granted: 1) for any code that Contributor has deleted from the
-          Contributor Version; 2)  separate from the Contributor Version;
-          3)  for infringements caused by: i) third party modifications of
-          Contributor Version or ii)  the combination of Modifications made
-          by that Contributor with other software  (except as part of the
-          Contributor Version) or other devices; or 4) under Patent Claims
-          infringed by Covered Code in the absence of Modifications made by
-          that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Application of License.
-     The Modifications which You create or to which You contribute are
-     governed by the terms of this License, including without limitation
-     Section 2.2. The Source Code version of Covered Code may be
-     distributed only under the terms of this License or a future version
-     of this License released under Section 6.1, and You must include a
-     copy of this License with every copy of the Source Code You
-     distribute. You may not offer or impose any terms on any Source Code
-     version that alters or restricts the applicable version of this
-     License or the recipients' rights hereunder. However, You may include
-     an additional document offering the additional rights described in
-     Section 3.5.
-
-     3.2. Availability of Source Code.
-     Any Modification which You create or to which You contribute must be
-     made available in Source Code form under the terms of this License
-     either on the same media as an Executable version or via an accepted
-     Electronic Distribution Mechanism to anyone to whom you made an
-     Executable version available; and if made available via Electronic
-     Distribution Mechanism, must remain available for at least twelve (12)
-     months after the date it initially became available, or at least six
-     (6) months after a subsequent version of that particular Modification
-     has been made available to such recipients. You are responsible for
-     ensuring that the Source Code version remains available even if the
-     Electronic Distribution Mechanism is maintained by a third party.
-
-     3.3. Description of Modifications.
-     You must cause all Covered Code to which You contribute to contain a
-     file documenting the changes You made to create that Covered Code and
-     the date of any change. You must include a prominent statement that
-     the Modification is derived, directly or indirectly, from Original
-     Code provided by the Initial Developer and including the name of the
-     Initial Developer in (a) the Source Code, and (b) in any notice in an
-     Executable version or related documentation in which You describe the
-     origin or ownership of the Covered Code.
-
-     3.4. Intellectual Property Matters
-          (a) Third Party Claims.
-          If Contributor has knowledge that a license under a third party's
-          intellectual property rights is required to exercise the rights
-          granted by such Contributor under Sections 2.1 or 2.2,
-          Contributor must include a text file with the Source Code
-          distribution titled "LEGAL" which describes the claim and the
-          party making the claim in sufficient detail that a recipient will
-          know whom to contact. If Contributor obtains such knowledge after
-          the Modification is made available as described in Section 3.2,
-          Contributor shall promptly modify the LEGAL file in all copies
-          Contributor makes available thereafter and shall take other steps
-          (such as notifying appropriate mailing lists or newsgroups)
-          reasonably calculated to inform those who received the Covered
-          Code that new knowledge has been obtained.
-
-          (b) Contributor APIs.
-          If Contributor's Modifications include an application programming
-          interface and Contributor has knowledge of patent licenses which
-          are reasonably necessary to implement that API, Contributor must
-          also include this information in the LEGAL file.
-
-               (c)    Representations.
-          Contributor represents that, except as disclosed pursuant to
-          Section 3.4(a) above, Contributor believes that Contributor's
-          Modifications are Contributor's original creation(s) and/or
-          Contributor has sufficient rights to grant the rights conveyed by
-          this License.
-
-     3.5. Required Notices.
-     You must duplicate the notice in Exhibit A in each file of the Source
-     Code.  If it is not possible to put such notice in a particular Source
-     Code file due to its structure, then You must include such notice in a
-     location (such as a relevant directory) where a user would be likely
-     to look for such a notice.  If You created one or more Modification(s)
-     You may add your name as a Contributor to the notice described in
-     Exhibit A.  You must also duplicate this License in any documentation
-     for the Source Code where You describe recipients' rights or ownership
-     rights relating to Covered Code.  You may choose to offer, and to
-     charge a fee for, warranty, support, indemnity or liability
-     obligations to one or more recipients of Covered Code. However, You
-     may do so only on Your own behalf, and not on behalf of the Initial
-     Developer or any Contributor. You must make it absolutely clear than
-     any such warranty, support, indemnity or liability obligation is
-     offered by You alone, and You hereby agree to indemnify the Initial
-     Developer and every Contributor for any liability incurred by the
-     Initial Developer or such Contributor as a result of warranty,
-     support, indemnity or liability terms You offer.
-
-     3.6. Distribution of Executable Versions.
-     You may distribute Covered Code in Executable form only if the
-     requirements of Section 3.1-3.5 have been met for that Covered Code,
-     and if You include a notice stating that the Source Code version of
-     the Covered Code is available under the terms of this License,
-     including a description of how and where You have fulfilled the
-     obligations of Section 3.2. The notice must be conspicuously included
-     in any notice in an Executable version, related documentation or
-     collateral in which You describe recipients' rights relating to the
-     Covered Code. You may distribute the Executable version of Covered
-     Code or ownership rights under a license of Your choice, which may
-     contain terms different from this License, provided that You are in
-     compliance with the terms of this License and that the license for the
-     Executable version does not attempt to limit or alter the recipient's
-     rights in the Source Code version from the rights set forth in this
-     License. If You distribute the Executable version under a different
-     license You must make it absolutely clear that any terms which differ
-     from this License are offered by You alone, not by the Initial
-     Developer or any Contributor. You hereby agree to indemnify the
-     Initial Developer and every Contributor for any liability incurred by
-     the Initial Developer or such Contributor as a result of any such
-     terms You offer.
-
-     3.7. Larger Works.
-     You may create a Larger Work by combining Covered Code with other code
-     not governed by the terms of this License and distribute the Larger
-     Work as a single product. In such a case, You must make sure the
-     requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
-     If it is impossible for You to comply with any of the terms of this
-     License with respect to some or all of the Covered Code due to
-     statute, judicial order, or regulation then You must: (a) comply with
-     the terms of this License to the maximum extent possible; and (b)
-     describe the limitations and the code they affect. Such description
-     must be included in the LEGAL file described in Section 3.4 and must
-     be included with all distributions of the Source Code. Except to the
-     extent prohibited by statute or regulation, such description must be
-     sufficiently detailed for a recipient of ordinary skill to be able to
-     understand it.
-
-5. Application of this License.
-
-     This License applies to code to which the Initial Developer has
-     attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
-     6.1. New Versions.
-     Netscape Communications Corporation ("Netscape") may publish revised
-     and/or new versions of the License from time to time. Each version
-     will be given a distinguishing version number.
-
-     6.2. Effect of New Versions.
-     Once Covered Code has been published under a particular version of the
-     License, You may always continue to use it under the terms of that
-     version. You may also choose to use such Covered Code under the terms
-     of any subsequent version of the License published by Netscape. No one
-     other than Netscape has the right to modify the terms applicable to
-     Covered Code created under this License.
-
-     6.3. Derivative Works.
-     If You create or use a modified version of this License (which you may
-     only do in order to apply it to code which is not already Covered Code
-     governed by this License), You must (a) rename Your license so that
-     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
-     "MPL", "NPL" or any confusingly similar phrase do not appear in your
-     license (except to note that your license differs from this License)
-     and (b) otherwise make it clear that Your version of the license
-     contains terms which differ from the Mozilla Public License and
-     Netscape Public License. (Filling in the name of the Initial
-     Developer, Original Code or Contributor in the notice described in
-     Exhibit A shall not of themselves be deemed to be modifications of
-     this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
-     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
-     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
-     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
-     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
-     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
-     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
-     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
-     8.1.  This License and the rights granted hereunder will terminate
-     automatically if You fail to comply with terms herein and fail to cure
-     such breach within 30 days of becoming aware of the breach. All
-     sublicenses to the Covered Code which are properly granted shall
-     survive any termination of this License. Provisions which, by their
-     nature, must remain in effect beyond the termination of this License
-     shall survive.
-
-     8.2.  If You initiate litigation by asserting a patent infringement
-     claim (excluding declatory judgment actions) against Initial Developer
-     or a Contributor (the Initial Developer or Contributor against whom
-     You file such action is referred to as "Participant")  alleging that:
-
-     (a)  such Participant's Contributor Version directly or indirectly
-     infringes any patent, then any and all rights granted by such
-     Participant to You under Sections 2.1 and/or 2.2 of this License
-     shall, upon 60 days notice from Participant terminate prospectively,
-     unless if within 60 days after receipt of notice You either: (i)
-     agree in writing to pay Participant a mutually agreeable reasonable
-     royalty for Your past and future use of Modifications made by such
-     Participant, or (ii) withdraw Your litigation claim with respect to
-     the Contributor Version against such Participant.  If within 60 days
-     of notice, a reasonable royalty and payment arrangement are not
-     mutually agreed upon in writing by the parties or the litigation claim
-     is not withdrawn, the rights granted by Participant to You under
-     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
-     the 60 day notice period specified above.
-
-     (b)  any software, hardware, or device, other than such Participant's
-     Contributor Version, directly or indirectly infringes any patent, then
-     any rights granted to You by such Participant under Sections 2.1(b)
-     and 2.2(b) are revoked effective as of the date You first made, used,
-     sold, distributed, or had made, Modifications made by that
-     Participant.
-
-     8.3.  If You assert a patent infringement claim against Participant
-     alleging that such Participant's Contributor Version directly or
-     indirectly infringes any patent where such claim is resolved (such as
-     by license or settlement) prior to the initiation of patent
-     infringement litigation, then the reasonable value of the licenses
-     granted by such Participant under Sections 2.1 or 2.2 shall be taken
-     into account in determining the amount or value of any payment or
-     license.
-
-     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
-     all end user license agreements (excluding distributors and resellers)
-     which have been validly granted by You or any distributor hereunder
-     prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
-     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
-     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
-     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
-     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
-     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
-     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
-     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
-     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
-     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
-     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
-     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
-     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
-     The Covered Code is a "commercial item," as that term is defined in
-     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-     software" and "commercial computer software documentation," as such
-     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
-     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
-     all U.S. Government End Users acquire Covered Code with only those
-     rights set forth herein.
-
-11. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject
-     matter hereof. If any provision of this License is held to be
-     unenforceable, such provision shall be reformed only to the extent
-     necessary to make it enforceable. This License shall be governed by
-     California law provisions (except to the extent applicable law, if
-     any, provides otherwise), excluding its conflict-of-law provisions.
-     With respect to disputes in which at least one party is a citizen of,
-     or an entity chartered or registered to do business in the United
-     States of America, any litigation relating to this License shall be
-     subject to the jurisdiction of the Federal Courts of the Northern
-     District of California, with venue lying in Santa Clara County,
-     California, with the losing party responsible for costs, including
-     without limitation, court costs and reasonable attorneys' fees and
-     expenses. The application of the United Nations Convention on
-     Contracts for the International Sale of Goods is expressly excluded.
-     Any law or regulation which provides that the language of a contract
-     shall be construed against the drafter shall not apply to this
-     License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is
-     responsible for claims and damages arising, directly or indirectly,
-     out of its utilization of rights under this License and You agree to
-     work with Initial Developer and Contributors to distribute such
-     responsibility on an equitable basis. Nothing herein is intended or
-     shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
-     Initial Developer may designate portions of the Covered Code as
-     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
-     Developer permits you to utilize portions of the Covered Code under
-     Your choice of the NPL or the alternative licenses, if any, specified
-     by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License.
-
-     ``The contents of this file are subject to the Mozilla Public License
-     Version 1.1 (the "License"); you may not use this file except in
-     compliance with the License. You may obtain a copy of the License at
-     http://www.mozilla.org/MPL/
-
-     Software distributed under the License is distributed on an "AS IS"
-     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-     License for the specific language governing rights and limitations
-     under the License.
-
-     The Original Code is RabbitMQ.
-
-     The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.''
-
-     [NOTE: The text of this Exhibit A may differ slightly from the text of
-     the notices in the Source Code files of the Original Code. You should
-     use the text of this Exhibit A rather than the text found in the
-     Original Code Source Code for Your Modifications.]
diff --git a/debian/README b/debian/README
deleted file mode 100644 (file)
index 0a29ee2..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-This is rabbitmq-server, a message broker implementing AMQP, STOMP and MQTT.
-
-Most of the documentation for RabbitMQ is provided on the RabbitMQ web
-site. You can see documentation for the current version at:
-
-http://www.rabbitmq.com/documentation.html
-
-and for previous versions at:
-
-http://www.rabbitmq.com/previous.html
-
-Man pages are installed with this package. Of particular interest are
-rabbitmqctl(1), to interact with a running RabbitMQ server, and
-rabbitmq-plugins(1), to enable and disable plugins. These should be
-run as the superuser.
-
-An example configuration file is provided in the same directory as
-this README. Copy it to /etc/rabbitmq/rabbitmq.config to use it. The
-RabbitMQ server must be restarted after changing the configuration
-file or enabling or disabling plugins.
index 0bf370ebffb492d78d0aa4e0312da0fdd1164878..99269f9fedef1e0651c0288e1e02b9eabc351235 100644 (file)
@@ -1,3 +1,11 @@
+rabbitmq-server (3.5.4-1~u14.04+mos1) mos7.0; urgency=medium
+
+  * Pick the source from Debian Sid, changes:
+    - build-depend on default version of dh-systemd;
+    - changelog was taken from original package;
+
+ -- Aleksandr Mogylchenko <amogylchenko@mirantis.com>  Thu, 06 Aug 2015 23:00:41 +0200
+
 rabbitmq-server (3.3.5-1~u14.04+mos2) mos7.0; urgency=medium
 
   * Repackaged for 7.0
@@ -343,4 +351,3 @@ rabbitmq-server (1.0.0-alpha-1) unstable; urgency=low
   * Initial release
 
  -- Tony Garnock-Jones <tonyg@shortstop.lshift.net>  Wed, 31 Jan 2007 19:06:33 +0000
-
index 45a4fb75db864000d01701c0f7a51864bd4daabf..ec635144f60048986bc560c5576355344005e6e7 100644 (file)
@@ -1 +1 @@
-8
+9
index 944fae4a92e283293af82f2a763316c4f781bc92..81e6f6a1bd5e66829a89c3833351a763d006f1de 100644 (file)
@@ -1,16 +1,31 @@
 Source: rabbitmq-server
 Section: net
 Priority: extra
-Maintainer: MOS Linux team <mos-linux@mirantis.com>
-XSBC-Orig-Maintainer: RabbitMQ Team <packaging@rabbitmq.com>
-Build-Depends: cdbs, debhelper (>= 9), erlang-dev, python-simplejson, xmlto, xsltproc, erlang-nox (>= 1:13.b.3), erlang-src (>= 1:13.b.3), unzip, zip
-Standards-Version: 3.9.2
+Maintainer: MOS Linux team <mos-linux-team@mirantis.com>
+XSBC-Orig-Maintainer: PKG OpenStack <openstack-devel@lists.alioth.debian.org>
+Uploaders: James Page <james.page@ubuntu.com>, Thomas Goirand <zigo@debian.org>
+Build-Depends: debhelper (>= 9~),
+               dh-systemd (>= 1.14~),
+               erlang-dev,
+               erlang-nox (>= 1:13.b.3),
+               erlang-src (>= 1:13.b.3),
+               python-simplejson,
+               unzip,
+               xmlto,
+               xsltproc,
+               zip
+Standards-Version: 3.9.6
+Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/rabbitmq-server.git
+Vcs-Git: git://anonscm.debian.org/openstack/rabbitmq-server.git
+Homepage: http://www.rabbitmq.com/
 
 Package: rabbitmq-server
 Architecture: all
-Depends: erlang-nox (>= 1:13.b.3) | esl-erlang, adduser, logrotate, ${misc:Depends}
+Depends: adduser,
+         erlang-nox (>= 1:13.b.3) | esl-erlang,
+         logrotate,
+         ${misc:Depends}
 Description: AMQP server written in Erlang
  RabbitMQ is an implementation of AMQP, the emerging standard for high
  performance enterprise messaging. The RabbitMQ server is a robust and
  scalable implementation of an AMQP broker.
-Homepage: http://www.rabbitmq.com/
index e384a7c8f8b99692735ab627b9e222bd2b95f3f3..3ce58ffa780c25bfb974d8bcb8063b0bfecb2531 100644 (file)
-This package was debianized by Tony Garnock-Jones <tonyg@rabbitmq.com> on
-Wed,  3 Jan 2007 15:43:44 +0000.
-
-It was downloaded from http://www.rabbitmq.com/
-
-
-This package, the RabbitMQ server is licensed under the MPL.
-
-If you have any questions regarding licensing, please contact us at
-info@rabbitmq.com.
-
-The files amqp-rabbitmq-0.8.json and amqp-rabbitmq-0.9.1.json are
-"Copyright (C) 2008-2013 GoPivotal", Inc. and are covered by the MIT
-license.
-
-jQuery is "Copyright (c) 2010 John Resig" and is covered by the MIT
-license.  It was downloaded from http://jquery.com/
-
-EJS is "Copyright (c) 2007 Edward Benson" and is covered by the MIT
-license.  It was downloaded from http://embeddedjs.com/
-
-Sammy is "Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC" and is
-covered by the MIT license.  It was downloaded from
-http://code.quirkey.com/sammy/
-
-ExplorerCanvas is "Copyright 2006 Google Inc" and is covered by the
-Apache License version 2.0. It was downloaded from
-http://code.google.com/p/explorercanvas/
-
-Flot is "Copyright (c) 2007-2013 IOLA and Ole Laursen" and is covered
-by the MIT license. It was downloaded from
-http://www.flotcharts.org/
-Webmachine is Copyright (c) Basho Technologies and is covered by the
-Apache License 2.0.  It was downloaded from http://webmachine.basho.com/
-
-Eldap is "Copyright (c) 2010, Torbjorn Tornkvist" and is covered by
-the MIT license.  It was downloaded from https://github.com/etnt/eldap
-
-Mochiweb is "Copyright (c) 2007 Mochi Media, Inc." and is covered by
-the MIT license.  It was downloaded from
-http://github.com/mochi/mochiweb/
-
-glMatrix is "Copyright (c) 2011, Brandon Jones" and is covered by the
-BSD 2-Clause license.  It was downloaded from
-http://code.google.com/p/glmatrix/
-
-
-The MIT license is as follows:
-
-        "Permission is hereby granted, free of charge, to any person
-        obtaining a copy of this file (the Software), to deal in the
-        Software without restriction, including without limitation the
-        rights to use, copy, modify, merge, publish, distribute,
-        sublicense, and/or sell copies of the Software, and to permit
-        persons to whom the Software is furnished to do so, subject to
-        the following conditions:
-
-        The above copyright notice and this permission notice shall be
-        included in all copies or substantial portions of the Software.
-
-        THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-        EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-        OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-        NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-        HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-        WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-        FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-        OTHER DEALINGS IN THE SOFTWARE."
-
-
-The BSD 2-Clause license is as follows:
-
-        "Redistribution and use in source and binary forms, with or
-        without modification, are permitted provided that the
-        following conditions are met:
-
-        1. Redistributions of source code must retain the above
-        copyright notice, this list of conditions and the following
-        disclaimer.
-
-        2. Redistributions in binary form must reproduce the above
-        copyright notice, this list of conditions and the following
-        disclaimer in the documentation and/or other materials
-        provided with the distribution.
-
-        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-        CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
-        INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-        MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-        DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-        CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-        SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-        NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-        LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-        HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-        CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-        OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-        EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
-
-
-The rest of this package is licensed under the Mozilla Public License 1.1
-Authors and Copyright are as described below:
-
-     The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-
-
-                          MOZILLA PUBLIC LICENSE
-                                Version 1.1
-
-                              ---------------
-
-1. Definitions.
-
-     1.0.1. "Commercial Use" means distribution or otherwise making the
-     Covered Code available to a third party.
-
-     1.1. "Contributor" means each entity that creates or contributes to
-     the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original
-     Code, prior Modifications used by a Contributor, and the Modifications
-     made by that particular Contributor.
-
-     1.3. "Covered Code" means the Original Code or Modifications or the
-     combination of the Original Code and Modifications, in each case
-     including portions thereof.
-
-     1.4. "Electronic Distribution Mechanism" means a mechanism generally
-     accepted in the software development community for the electronic
-     transfer of data.
-
-     1.5. "Executable" means Covered Code in any form other than Source
-     Code.
-
-     1.6. "Initial Developer" means the individual or entity identified
-     as the Initial Developer in the Source Code notice required by Exhibit
-     A.
-
-     1.7. "Larger Work" means a work which combines Covered Code or
-     portions thereof with code not governed by the terms of this License.
-
-     1.8. "License" means this document.
-
-     1.8.1. "Licensable" means having the right to grant, to the maximum
-     extent possible, whether at the time of the initial grant or
-     subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means any addition to or deletion from the
-     substance or structure of either the Original Code or any previous
-     Modifications. When Covered Code is released as a series of files, a
-     Modification is:
-          A. Any addition to or deletion from the contents of a file
-          containing Original Code or previous Modifications.
-
-          B. Any new file that contains any part of the Original Code or
-          previous Modifications.
-
-     1.10. "Original Code" means Source Code of computer software code
-     which is described in the Source Code notice required by Exhibit A as
-     Original Code, and which, at the time of its release under this
-     License is not already Covered Code governed by this License.
-
-     1.10.1. "Patent Claims" means any patent claim(s), now owned or
-     hereafter acquired, including without limitation,  method, process,
-     and apparatus claims, in any patent Licensable by grantor.
-
-     1.11. "Source Code" means the preferred form of the Covered Code for
-     making modifications to it, including all modules it contains, plus
-     any associated interface definition files, scripts used to control
-     compilation and installation of an Executable, or source code
-     differential comparisons against either the Original Code or another
-     well known, available Covered Code of the Contributor's choice. The
-     Source Code can be in a compressed or archival form, provided the
-     appropriate decompression or de-archiving software is widely available
-     for no charge.
-
-     1.12. "You" (or "Your")  means an individual or a legal entity
-     exercising rights under, and complying with all of the terms of, this
-     License or a future version of this License issued under Section 6.1.
-     For legal entities, "You" includes any entity which controls, is
-     controlled by, or is under common control with You. For purposes of
-     this definition, "control" means (a) the power, direct or indirect,
-     to cause the direction or management of such entity, whether by
-     contract or otherwise, or (b) ownership of more than fifty percent
-     (50%) of the outstanding shares or beneficial ownership of such
-     entity.
-
-2. Source Code License.
-
-     2.1. The Initial Developer Grant.
-     The Initial Developer hereby grants You a world-wide, royalty-free,
-     non-exclusive license, subject to third party intellectual property
-     claims:
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Initial Developer to use, reproduce,
-          modify, display, perform, sublicense and distribute the Original
-          Code (or portions thereof) with or without Modifications, and/or
-          as part of a Larger Work; and
-
-          (b) under Patents Claims infringed by the making, using or
-          selling of Original Code, to make, have made, use, practice,
-          sell, and offer for sale, and/or otherwise dispose of the
-          Original Code (or portions thereof).
-
-          (c) the licenses granted in this Section 2.1(a) and (b) are
-          effective on the date Initial Developer first distributes
-          Original Code under the terms of this License.
-
-          (d) Notwithstanding Section 2.1(b) above, no patent license is
-          granted: 1) for code that You delete from the Original Code; 2)
-          separate from the Original Code;  or 3) for infringements caused
-          by: i) the modification of the Original Code or ii) the
-          combination of the Original Code with other software or devices.
-
-     2.2. Contributor Grant.
-     Subject to third party intellectual property claims, each Contributor
-     hereby grants You a world-wide, royalty-free, non-exclusive license
-
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Contributor, to use, reproduce, modify,
-          display, perform, sublicense and distribute the Modifications
-          created by such Contributor (or portions thereof) either on an
-          unmodified basis, with other Modifications, as Covered Code
-          and/or as part of a Larger Work; and
-
-          (b) under Patent Claims infringed by the making, using, or
-          selling of  Modifications made by that Contributor either alone
-          and/or in combination with its Contributor Version (or portions
-          of such combination), to make, use, sell, offer for sale, have
-          made, and/or otherwise dispose of: 1) Modifications made by that
-          Contributor (or portions thereof); and 2) the combination of
-          Modifications made by that Contributor with its Contributor
-          Version (or portions of such combination).
-
-          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
-          effective on the date Contributor first makes Commercial Use of
-          the Covered Code.
-
-          (d)    Notwithstanding Section 2.2(b) above, no patent license is
-          granted: 1) for any code that Contributor has deleted from the
-          Contributor Version; 2)  separate from the Contributor Version;
-          3)  for infringements caused by: i) third party modifications of
-          Contributor Version or ii)  the combination of Modifications made
-          by that Contributor with other software  (except as part of the
-          Contributor Version) or other devices; or 4) under Patent Claims
-          infringed by Covered Code in the absence of Modifications made by
-          that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Application of License.
-     The Modifications which You create or to which You contribute are
-     governed by the terms of this License, including without limitation
-     Section 2.2. The Source Code version of Covered Code may be
-     distributed only under the terms of this License or a future version
-     of this License released under Section 6.1, and You must include a
-     copy of this License with every copy of the Source Code You
-     distribute. You may not offer or impose any terms on any Source Code
-     version that alters or restricts the applicable version of this
-     License or the recipients' rights hereunder. However, You may include
-     an additional document offering the additional rights described in
-     Section 3.5.
-
-     3.2. Availability of Source Code.
-     Any Modification which You create or to which You contribute must be
-     made available in Source Code form under the terms of this License
-     either on the same media as an Executable version or via an accepted
-     Electronic Distribution Mechanism to anyone to whom you made an
-     Executable version available; and if made available via Electronic
-     Distribution Mechanism, must remain available for at least twelve (12)
-     months after the date it initially became available, or at least six
-     (6) months after a subsequent version of that particular Modification
-     has been made available to such recipients. You are responsible for
-     ensuring that the Source Code version remains available even if the
-     Electronic Distribution Mechanism is maintained by a third party.
-
-     3.3. Description of Modifications.
-     You must cause all Covered Code to which You contribute to contain a
-     file documenting the changes You made to create that Covered Code and
-     the date of any change. You must include a prominent statement that
-     the Modification is derived, directly or indirectly, from Original
-     Code provided by the Initial Developer and including the name of the
-     Initial Developer in (a) the Source Code, and (b) in any notice in an
-     Executable version or related documentation in which You describe the
-     origin or ownership of the Covered Code.
-
-     3.4. Intellectual Property Matters
-          (a) Third Party Claims.
-          If Contributor has knowledge that a license under a third party's
-          intellectual property rights is required to exercise the rights
-          granted by such Contributor under Sections 2.1 or 2.2,
-          Contributor must include a text file with the Source Code
-          distribution titled "LEGAL" which describes the claim and the
-          party making the claim in sufficient detail that a recipient will
-          know whom to contact. If Contributor obtains such knowledge after
-          the Modification is made available as described in Section 3.2,
-          Contributor shall promptly modify the LEGAL file in all copies
-          Contributor makes available thereafter and shall take other steps
-          (such as notifying appropriate mailing lists or newsgroups)
-          reasonably calculated to inform those who received the Covered
-          Code that new knowledge has been obtained.
-
-          (b) Contributor APIs.
-          If Contributor's Modifications include an application programming
-          interface and Contributor has knowledge of patent licenses which
-          are reasonably necessary to implement that API, Contributor must
-          also include this information in the LEGAL file.
-
-               (c)    Representations.
-          Contributor represents that, except as disclosed pursuant to
-          Section 3.4(a) above, Contributor believes that Contributor's
-          Modifications are Contributor's original creation(s) and/or
-          Contributor has sufficient rights to grant the rights conveyed by
-          this License.
-
-     3.5. Required Notices.
-     You must duplicate the notice in Exhibit A in each file of the Source
-     Code.  If it is not possible to put such notice in a particular Source
-     Code file due to its structure, then You must include such notice in a
-     location (such as a relevant directory) where a user would be likely
-     to look for such a notice.  If You created one or more Modification(s)
-     You may add your name as a Contributor to the notice described in
-     Exhibit A.  You must also duplicate this License in any documentation
-     for the Source Code where You describe recipients' rights or ownership
-     rights relating to Covered Code.  You may choose to offer, and to
-     charge a fee for, warranty, support, indemnity or liability
-     obligations to one or more recipients of Covered Code. However, You
-     may do so only on Your own behalf, and not on behalf of the Initial
-     Developer or any Contributor. You must make it absolutely clear than
-     any such warranty, support, indemnity or liability obligation is
-     offered by You alone, and You hereby agree to indemnify the Initial
-     Developer and every Contributor for any liability incurred by the
-     Initial Developer or such Contributor as a result of warranty,
-     support, indemnity or liability terms You offer.
-
-     3.6. Distribution of Executable Versions.
-     You may distribute Covered Code in Executable form only if the
-     requirements of Section 3.1-3.5 have been met for that Covered Code,
-     and if You include a notice stating that the Source Code version of
-     the Covered Code is available under the terms of this License,
-     including a description of how and where You have fulfilled the
-     obligations of Section 3.2. The notice must be conspicuously included
-     in any notice in an Executable version, related documentation or
-     collateral in which You describe recipients' rights relating to the
-     Covered Code. You may distribute the Executable version of Covered
-     Code or ownership rights under a license of Your choice, which may
-     contain terms different from this License, provided that You are in
-     compliance with the terms of this License and that the license for the
-     Executable version does not attempt to limit or alter the recipient's
-     rights in the Source Code version from the rights set forth in this
-     License. If You distribute the Executable version under a different
-     license You must make it absolutely clear that any terms which differ
-     from this License are offered by You alone, not by the Initial
-     Developer or any Contributor. You hereby agree to indemnify the
-     Initial Developer and every Contributor for any liability incurred by
-     the Initial Developer or such Contributor as a result of any such
-     terms You offer.
-
-     3.7. Larger Works.
-     You may create a Larger Work by combining Covered Code with other code
-     not governed by the terms of this License and distribute the Larger
-     Work as a single product. In such a case, You must make sure the
-     requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
-     If it is impossible for You to comply with any of the terms of this
-     License with respect to some or all of the Covered Code due to
-     statute, judicial order, or regulation then You must: (a) comply with
-     the terms of this License to the maximum extent possible; and (b)
-     describe the limitations and the code they affect. Such description
-     must be included in the LEGAL file described in Section 3.4 and must
-     be included with all distributions of the Source Code. Except to the
-     extent prohibited by statute or regulation, such description must be
-     sufficiently detailed for a recipient of ordinary skill to be able to
-     understand it.
-
-5. Application of this License.
-
-     This License applies to code to which the Initial Developer has
-     attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
-     6.1. New Versions.
-     Netscape Communications Corporation ("Netscape") may publish revised
-     and/or new versions of the License from time to time. Each version
-     will be given a distinguishing version number.
-
-     6.2. Effect of New Versions.
-     Once Covered Code has been published under a particular version of the
-     License, You may always continue to use it under the terms of that
-     version. You may also choose to use such Covered Code under the terms
-     of any subsequent version of the License published by Netscape. No one
-     other than Netscape has the right to modify the terms applicable to
-     Covered Code created under this License.
-
-     6.3. Derivative Works.
-     If You create or use a modified version of this License (which you may
-     only do in order to apply it to code which is not already Covered Code
-     governed by this License), You must (a) rename Your license so that
-     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
-     "MPL", "NPL" or any confusingly similar phrase do not appear in your
-     license (except to note that your license differs from this License)
-     and (b) otherwise make it clear that Your version of the license
-     contains terms which differ from the Mozilla Public License and
-     Netscape Public License. (Filling in the name of the Initial
-     Developer, Original Code or Contributor in the notice described in
-     Exhibit A shall not of themselves be deemed to be modifications of
-     this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
-     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
-     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
-     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
-     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
-     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
-     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
-     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
-     8.1.  This License and the rights granted hereunder will terminate
-     automatically if You fail to comply with terms herein and fail to cure
-     such breach within 30 days of becoming aware of the breach. All
-     sublicenses to the Covered Code which are properly granted shall
-     survive any termination of this License. Provisions which, by their
-     nature, must remain in effect beyond the termination of this License
-     shall survive.
-
-     8.2.  If You initiate litigation by asserting a patent infringement
-     claim (excluding declatory judgment actions) against Initial Developer
-     or a Contributor (the Initial Developer or Contributor against whom
-     You file such action is referred to as "Participant")  alleging that:
-
-     (a)  such Participant's Contributor Version directly or indirectly
-     infringes any patent, then any and all rights granted by such
-     Participant to You under Sections 2.1 and/or 2.2 of this License
-     shall, upon 60 days notice from Participant terminate prospectively,
-     unless if within 60 days after receipt of notice You either: (i)
-     agree in writing to pay Participant a mutually agreeable reasonable
-     royalty for Your past and future use of Modifications made by such
-     Participant, or (ii) withdraw Your litigation claim with respect to
-     the Contributor Version against such Participant.  If within 60 days
-     of notice, a reasonable royalty and payment arrangement are not
-     mutually agreed upon in writing by the parties or the litigation claim
-     is not withdrawn, the rights granted by Participant to You under
-     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
-     the 60 day notice period specified above.
-
-     (b)  any software, hardware, or device, other than such Participant's
-     Contributor Version, directly or indirectly infringes any patent, then
-     any rights granted to You by such Participant under Sections 2.1(b)
-     and 2.2(b) are revoked effective as of the date You first made, used,
-     sold, distributed, or had made, Modifications made by that
-     Participant.
-
-     8.3.  If You assert a patent infringement claim against Participant
-     alleging that such Participant's Contributor Version directly or
-     indirectly infringes any patent where such claim is resolved (such as
-     by license or settlement) prior to the initiation of patent
-     infringement litigation, then the reasonable value of the licenses
-     granted by such Participant under Sections 2.1 or 2.2 shall be taken
-     into account in determining the amount or value of any payment or
-     license.
-
-     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
-     all end user license agreements (excluding distributors and resellers)
-     which have been validly granted by You or any distributor hereunder
-     prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
-     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
-     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
-     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
-     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
-     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
-     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
-     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
-     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
-     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
-     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
-     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
-     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
-     The Covered Code is a "commercial item," as that term is defined in
-     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-     software" and "commercial computer software documentation," as such
-     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
-     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
-     all U.S. Government End Users acquire Covered Code with only those
-     rights set forth herein.
-
-11. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject
-     matter hereof. If any provision of this License is held to be
-     unenforceable, such provision shall be reformed only to the extent
-     necessary to make it enforceable. This License shall be governed by
-     California law provisions (except to the extent applicable law, if
-     any, provides otherwise), excluding its conflict-of-law provisions.
-     With respect to disputes in which at least one party is a citizen of,
-     or an entity chartered or registered to do business in the United
-     States of America, any litigation relating to this License shall be
-     subject to the jurisdiction of the Federal Courts of the Northern
-     District of California, with venue lying in Santa Clara County,
-     California, with the losing party responsible for costs, including
-     without limitation, court costs and reasonable attorneys' fees and
-     expenses. The application of the United Nations Convention on
-     Contracts for the International Sale of Goods is expressly excluded.
-     Any law or regulation which provides that the language of a contract
-     shall be construed against the drafter shall not apply to this
-     License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is
-     responsible for claims and damages arising, directly or indirectly,
-     out of its utilization of rights under this License and You agree to
-     work with Initial Developer and Contributors to distribute such
-     responsibility on an equitable basis. Nothing herein is intended or
-     shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
-     Initial Developer may designate portions of the Covered Code as
-     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
-     Developer permits you to utilize portions of the Covered Code under
-     Your choice of the NPL or the alternative licenses, if any, specified
-     by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License.
-
-     ``The contents of this file are subject to the Mozilla Public License
-     Version 1.1 (the "License"); you may not use this file except in
-     compliance with the License. You may obtain a copy of the License at
-     http://www.mozilla.org/MPL/
-
-     Software distributed under the License is distributed on an "AS IS"
-     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-     License for the specific language governing rights and limitations
-     under the License.
-
-     The Original Code is RabbitMQ.
-
-     The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.''
-
-     [NOTE: The text of this Exhibit A may differ slightly from the text of
-     the notices in the Source Code files of the Original Code. You should
-     use the text of this Exhibit A rather than the text found in the
-     Original Code Source Code for Your Modifications.]
-
-
-The Debian packaging is (C) 2007-2013, GoPivotal, Inc. and is licensed
-under the MPL 1.1, see above.
-
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: RabbitMQ
+Source: http://www.rabbitmq.com/
+
+Files: debian/*
+Copyright: (c) 2007-2013, GoPivotal, Inc.
+           (c) 2007, Tony Garnock-Jones <tonyg@rabbitmq.com>
+           (c) 2014, Blair Hester <bhester@gopivotal.com>
+           (c) 2012-2014, Emile Joubert <emile@rabbitmq.com>
+           (c) 2008-2012, John Leuner <jewel@debian.org>
+           (c) 2014, James Page <james.page@canonical.com>
+           (c) 2014, Thomas Goirand <zigo@debian.org>
+License: MPL-1.1
+
+Files: codegen/amqp-rabbitmq-*.json
+Copyright: (c) 2008-2013, GoPivotal Inc.
+License: Expat
+
+Files: plugins-src/rabbitmq-management/priv/www/js/jquery*.js
+Copyright: (c) 2010 John Resig
+License: Expat
+Comments: Downloaded from http://jquery.com/
+
+Files: plugins-src/rabbitmq-management/priv/www/js/ejs*
+ plugins-src/rabbitmq-management/priv/www/js/tmpl
+Copyright: (c) 2007, Edward Benson
+License: Expat
+Comments: downloaded from http://embeddedjs.com/
+
+Files: plugins-src/rabbitmq-management/priv/www/js/sammy*.js
+Copyright: (c) 2008 Aaron Quint, Quirkey NYC, LLC
+License: Expat
+Comments: Downloaded from http://code.quirkey.com/sammy/
+
+Files: plugins-src/rabbitmq-management/priv/www/js/excanvas*.js
+Copyright: (c) 2006, Google Inc
+License: Apache-2.0
+Comments: Downloaded from http://code.google.com/p/explorercanvas/
+
+Files: plugins-src/rabbitmq-management/priv/www/js/jquery.flot*.js
+Copyright: (c) 2007-2013, IOLA and Ole Laursen
+License: Expat
+Comments: Downloaded from http://www.flotcharts.org/
+
+Files: plugins-src/webmachine-wrapper/*
+Copyright: (c) Basho Technologies
+License: Apache-2.0
+Comments: Downloaded from http://webmachine.basho.com/
+
+Files: plugins-src/eldap-wrapper/*
+Copyright: (c) 2010, Torbjorn Tornkvist
+License: Expat
+Comments: Downloaded from https://github.com/etnt/eldap
+
+Files: plugins-src/mochiweb-wrapper/mochiweb-git/*
+Copyright: (c) 2007, Mochi Media, Inc.
+License: Expat
+Comments: Downloaded from http://github.com/mochi/mochiweb/
+
+Files: 
+ plugins-src/rabbitmq-management-visualiser/priv/www/visualiser/js/glMatrix*.js
+Copyright: (c) 2011, Brandon Jones
+License: BSD-2-Clause
+Comments: Downloaded from http://code.google.com/p/glmatrix/
+
+Files: *
+Copyright: (c) 2007-2014 GoPivotal, Inc.
+License: MPL-1.1
+
+License: Expat
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this file (the Software), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to permit
+ persons to whom the Software is furnished to do so, subject to the following
+ conditions:
+ .
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ IN THE SOFTWARE."
+
+License: BSD-2-Clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ .
+  1. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+ .
+  2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE."
+
+License: MPL-1.1
+ MOZILLA PUBLIC LICENSE Version 1.1
+ 1. Definitions.
+ .
+ 1.0.1. "Commercial Use" means distribution or otherwise making the Covered
+ Code available to a third party.
+ .
+ 1.1. "Contributor" means each entity that creates or contributes to the
+ creation of Modifications.
+ .
+ 1.2. "Contributor Version" means the combination of the Original Code, prior
+ Modifications used by a Contributor, and the Modifications made by that
+ particular Contributor.
+ .
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case including
+ portions thereof.
+ .
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally accepted
+ in the software development community for the electronic transfer of data.
+ .
+ 1.5. "Executable" means Covered Code in any form other than Source Code.
+ .
+ 1.6. "Initial Developer" means the individual or entity identified as the
+ Initial Developer in the Source Code notice required by Exhibit A.
+ .
+ 1.7. "Larger Work" means a work which combines Covered Code or portions
+ thereof with code not governed by the terms of this License.
+ .
+ 1.8. "License" means this document.
+ .
+ 1.8.1. "Licensable" means having the right to grant, to the maximum extent
+ possible, whether at the time of the initial grant or subsequently acquired,
+ any and all of the rights conveyed herein.
+ .
+ 1.9. "Modifications" means any addition to or deletion from the substance or
+ structure of either the Original Code or any previous Modifications. When
+ Covered Code is released as a series of files, a Modification is:
+ .
+ A. Any addition to or deletion from the contents of a file containing
+    Original Code or previous Modifications.
+ .
+ B. Any new file that contains any part of the Original Code or previous
+    Modifications.
+ .
+ 1.10. "Original Code" means Source Code of computer software code which is
+ described in the Source Code notice required by Exhibit A as Original Code,
+ and which, at the time of its release under this License is not already
+ Covered Code governed by this License.
+ .
+ 1.10.1. "Patent Claims" means any patent claim(s), now owned or hereafter
+ acquired, including without limitation,  method, process, and apparatus
+ claims, in any patent Licensable by grantor.
+ .
+ 1.11. "Source Code" means the preferred form of the Covered Code for making
+ modifications to it, including all modules it contains, plus any associated
+ interface definition files, scripts used to control compilation and
+ installation of an Executable, or source code differential comparisons
+ against either the Original Code or another well known, available Covered
+ Code of the Contributor's choice. The Source Code can be in a compressed or
+ archival form, provided the appropriate decompression or de-archiving
+ software is widely available for no charge.
+ .
+ 1.12. "You" (or "Your")  means an individual or a legal entity exercising
+ rights under, and complying with all of the terms of, this License or a
+ future version of this License issued under Section 6.1. For legal entities,
+ "You" includes any entity which controls, is controlled by, or is under
+ common control with You. For purposes of this definition, "control" means (a)
+ the power, direct or indirect, to cause the direction or management of such
+ entity, whether by contract or otherwise, or (b) ownership of more than fifty
+ percent (50%) of the outstanding shares or beneficial ownership of such
+ entity.
+ .
+ 2. Source Code License.
+ .
+ 2.1. The Initial Developer Grant.
+ The Initial Developer hereby grants You a world-wide, royalty-free,
+ non-exclusive license, subject to third party intellectual property claims:
+ .
+ (a) under intellectual property rights (other than patent or
+     trademark) Licensable by Initial Developer to use, reproduce,
+     modify, display, perform, sublicense and distribute the Original
+     Code (or portions thereof) with or without Modifications, and/or
+     as part of a Larger Work; and
+ .
+ (b) under Patents Claims infringed by the making, using or
+     selling of Original Code, to make, have made, use, practice,
+     sell, and offer for sale, and/or otherwise dispose of the
+     Original Code (or portions thereof).
+ .
+ (c) the licenses granted in this Section 2.1(a) and (b) are
+     effective on the date Initial Developer first distributes
+     Original Code under the terms of this License.
+ .
+ (d) Notwithstanding Section 2.1(b) above, no patent license is
+     granted: 1) for code that You delete from the Original Code; 2)
+     separate from the Original Code;  or 3) for infringements caused
+     by: i) the modification of the Original Code or ii) the
+     combination of the Original Code with other software or devices.
+ .
+ 2.2. Contributor Grant.
+ Subject to third party intellectual property claims, each Contributor hereby
+ grants You a world-wide, royalty-free, non-exclusive license
+ .
+ (a) under intellectual property rights (other than patent or
+     trademark) Licensable by Contributor, to use, reproduce, modify,
+     display, perform, sublicense and distribute the Modifications
+     created by such Contributor (or portions thereof) either on an
+     unmodified basis, with other Modifications, as Covered Code
+     and/or as part of a Larger Work; and
+ .
+ (b) under Patent Claims infringed by the making, using, or
+     selling of  Modifications made by that Contributor either alone
+     and/or in combination with its Contributor Version (or portions
+     of such combination), to make, use, sell, offer for sale, have
+     made, and/or otherwise dispose of: 1) Modifications made by that
+     Contributor (or portions thereof); and 2) the combination of
+     Modifications made by that Contributor with its Contributor
+     Version (or portions of such combination).
+ .
+ (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+     effective on the date Contributor first makes Commercial Use of
+     the Covered Code.
+ .
+ (d) Notwithstanding Section 2.2(b) above, no patent license is
+     granted: 1) for any code that Contributor has deleted from the
+     Contributor Version; 2)  separate from the Contributor Version;
+     3)  for infringements caused by: i) third party modifications of
+     Contributor Version or ii)  the combination of Modifications made
+     by that Contributor with other software  (except as part of the
+     Contributor Version) or other devices; or 4) under Patent Claims
+     infringed by Covered Code in the absence of Modifications made by
+     that Contributor.
+ .
+ 3. Distribution Obligations.
+ .
+ 3.1. Application of License.
+ .
+ The Modifications which You create or to which You contribute are governed by
+ the terms of this License, including without limitation Section 2.2. The
+ Source Code version of Covered Code may be distributed only under the terms of
+ this License or a future version of this License released under Section 6.1,
+ and You must include a copy of this License with every copy of the Source Code
+ You distribute. You may not offer or impose any terms on any Source Code
+ version that alters or restricts the applicable version of this License or the
+ recipients' rights hereunder. However, You may include an additional document
+ offering the additional rights described in Section 3.5.
+ .
+ 3.2. Availability of Source Code.
+ .
+ Any Modification which You create or to which You contribute must be made
+ available in Source Code form under the terms of this License either on the
+ same media as an Executable version or via an accepted Electronic Distribution
+ Mechanism to anyone to whom you made an Executable version available; and if
+ made available via Electronic Distribution Mechanism, must remain available
+ for at least twelve (12) months after the date it initially became available,
+ or at least six (6) months after a subsequent version of that particular
+ Modification has been made available to such recipients. You are responsible
+ for ensuring that the Source Code version remains available even if the
+ Electronic Distribution Mechanism is maintained by a third party.
+ .
+ 3.3. Description of Modifications.
+ .
+ You must cause all Covered Code to which You contribute to contain a file
+ documenting the changes You made to create that Covered Code and the date of
+ any change. You must include a prominent statement that the Modification is
+ derived, directly or indirectly, from Original Code provided by the Initial
+ Developer and including the name of the Initial Developer in (a) the Source
+ Code, and (b) in any notice in an Executable version or related documentation
+ in which You describe the origin or ownership of the Covered Code.
+ .
+ 3.4. Intellectual Property Matters
+ .
+ (a) Third Party Claims.
+ .
+ If Contributor has knowledge that a license under a third party's
+ intellectual property rights is required to exercise the rights
+ granted by such Contributor under Sections 2.1 or 2.2,
+ Contributor must include a text file with the Source Code
+ distribution titled "LEGAL" which describes the claim and the
+ party making the claim in sufficient detail that a recipient will
+ know whom to contact. If Contributor obtains such knowledge after
+ the Modification is made available as described in Section 3.2,
+ Contributor shall promptly modify the LEGAL file in all copies
+ Contributor makes available thereafter and shall take other steps
+ (such as notifying appropriate mailing lists or newsgroups)
+ reasonably calculated to inform those who received the Covered
+ Code that new knowledge has been obtained.
+ .
+ (b) Contributor APIs.
+ .
+ If Contributor's Modifications include an application programming
+ interface and Contributor has knowledge of patent licenses which
+ are reasonably necessary to implement that API, Contributor must
+ also include this information in the LEGAL file.
+ .
+ (c) Representations.
+ .
+ Contributor represents that, except as disclosed pursuant to
+ Section 3.4(a) above, Contributor believes that Contributor's
+ Modifications are Contributor's original creation(s) and/or
+ Contributor has sufficient rights to grant the rights conveyed by
+ this License.
+ .
+ 3.5. Required Notices.
+ .
+ You must duplicate the notice in Exhibit A in each file of the Source
+ Code.  If it is not possible to put such notice in a particular Source
+ Code file due to its structure, then You must include such notice in a
+ location (such as a relevant directory) where a user would be likely
+ to look for such a notice.  If You created one or more Modification(s)
+ You may add your name as a Contributor to the notice described in
+ Exhibit A.  You must also duplicate this License in any documentation
+ for the Source Code where You describe recipients' rights or ownership
+ rights relating to Covered Code.  You may choose to offer, and to
+ charge a fee for, warranty, support, indemnity or liability
+ obligations to one or more recipients of Covered Code. However, You
+ may do so only on Your own behalf, and not on behalf of the Initial
+ Developer or any Contributor. You must make it absolutely clear than
+ any such warranty, support, indemnity or liability obligation is
+ offered by You alone, and You hereby agree to indemnify the Initial
+ Developer and every Contributor for any liability incurred by the
+ Initial Developer or such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer.
+ .
+ 3.6. Distribution of Executable Versions.
+ .
+ You may distribute Covered Code in Executable form only if the
+ requirements of Section 3.1-3.5 have been met for that Covered Code,
+ and if You include a notice stating that the Source Code version of
+ the Covered Code is available under the terms of this License,
+ including a description of how and where You have fulfilled the
+ obligations of Section 3.2. The notice must be conspicuously included
+ in any notice in an Executable version, related documentation or
+ collateral in which You describe recipients' rights relating to the
+ Covered Code. You may distribute the Executable version of Covered
+ Code or ownership rights under a license of Your choice, which may
+ contain terms different from this License, provided that You are in
+ compliance with the terms of this License and that the license for the
+ Executable version does not attempt to limit or alter the recipient's
+ rights in the Source Code version from the rights set forth in this
+ License. If You distribute the Executable version under a different
+ license You must make it absolutely clear that any terms which differ
+ from this License are offered by You alone, not by the Initial
+ Developer or any Contributor. You hereby agree to indemnify the
+ Initial Developer and every Contributor for any liability incurred by
+ the Initial Developer or such Contributor as a result of any such
+ terms You offer.
+ .
+ 3.7. Larger Works.
+ .
+ You may create a Larger Work by combining Covered Code with other code
+ not governed by the terms of this License and distribute the Larger
+ Work as a single product. In such a case, You must make sure the
+ requirements of this License are fulfilled for the Covered Code.
+ .
+ 4. Inability to Comply Due to Statute or Regulation.
+ .
+ If it is impossible for You to comply with any of the terms of this
+ License with respect to some or all of the Covered Code due to
+ statute, judicial order, or regulation then You must: (a) comply with
+ the terms of this License to the maximum extent possible; and (b)
+ describe the limitations and the code they affect. Such description
+ must be included in the LEGAL file described in Section 3.4 and must
+ be included with all distributions of the Source Code. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+ .
+ 5. Application of this License.
+ .
+ This License applies to code to which the Initial Developer has
+ attached the notice in Exhibit A and to related Covered Code.
+ .
+ 6. Versions of the License.
+ .
+ 6.1. New Versions.
+ .
+ Netscape Communications Corporation ("Netscape") may publish revised
+ and/or new versions of the License from time to time. Each version
+ will be given a distinguishing version number.
+ .
+ 6.2. Effect of New Versions.
+ .
+ Once Covered Code has been published under a particular version of the
+ License, You may always continue to use it under the terms of that
+ version. You may also choose to use such Covered Code under the terms
+ of any subsequent version of the License published by Netscape. No one
+ other than Netscape has the right to modify the terms applicable to
+ Covered Code created under this License.
+ .
+ 6.3. Derivative Works.
+ .
+ If You create or use a modified version of this License (which you may
+ only do in order to apply it to code which is not already Covered Code
+ governed by this License), You must (a) rename Your license so that
+ the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+ "MPL", "NPL" or any confusingly similar phrase do not appear in your
+ license (except to note that your license differs from this License)
+ and (b) otherwise make it clear that Your version of the license
+ contains terms which differ from the Mozilla Public License and
+ Netscape Public License. (Filling in the name of the Initial
+ Developer, Original Code or Contributor in the notice described in
+ Exhibit A shall not of themselves be deemed to be modifications of
+ this License.)
+ .
+ 7. DISCLAIMER OF WARRANTY.
+ .
+ COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+ DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+ THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+ IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+ YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+ COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+ OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+ ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+ .
+ 8. TERMINATION.
+ .
+ 8.1.  This License and the rights granted hereunder will terminate
+ automatically if You fail to comply with terms herein and fail to cure
+ such breach within 30 days of becoming aware of the breach. All
+ sublicenses to the Covered Code which are properly granted shall
+ survive any termination of this License. Provisions which, by their
+ nature, must remain in effect beyond the termination of this License
+ shall survive.
+ .
+ 8.2.  If You initiate litigation by asserting a patent infringement
+ claim (excluding declatory judgment actions) against Initial Developer
+ or a Contributor (the Initial Developer or Contributor against whom
+ You file such action is referred to as "Participant")  alleging that:
+ .
+ (a)  such Participant's Contributor Version directly or indirectly
+ infringes any patent, then any and all rights granted by such
+ Participant to You under Sections 2.1 and/or 2.2 of this License
+ shall, upon 60 days notice from Participant terminate prospectively,
+ unless if within 60 days after receipt of notice You either: (i)
+ agree in writing to pay Participant a mutually agreeable reasonable
+ royalty for Your past and future use of Modifications made by such
+ Participant, or (ii) withdraw Your litigation claim with respect to
+ the Contributor Version against such Participant.  If within 60 days
+ of notice, a reasonable royalty and payment arrangement are not
+ mutually agreed upon in writing by the parties or the litigation claim
+ is not withdrawn, the rights granted by Participant to You under
+ Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+ the 60 day notice period specified above.
+ .
+ (b)  any software, hardware, or device, other than such Participant's
+ Contributor Version, directly or indirectly infringes any patent, then
+ any rights granted to You by such Participant under Sections 2.1(b)
+ and 2.2(b) are revoked effective as of the date You first made, used,
+ sold, distributed, or had made, Modifications made by that
+ Participant.
+ .
+ 8.3.  If You assert a patent infringement claim against Participant
+ alleging that such Participant's Contributor Version directly or
+ indirectly infringes any patent where such claim is resolved (such as
+ by license or settlement) prior to the initiation of patent
+ infringement litigation, then the reasonable value of the licenses
+ granted by such Participant under Sections 2.1 or 2.2 shall be taken
+ into account in determining the amount or value of any payment or
+ license.
+ .
+ 8.4.  In the event of termination under Sections 8.1 or 8.2 above,
+ all end user license agreements (excluding distributors and resellers)
+ which have been validly granted by You or any distributor hereunder
+ prior to termination shall survive termination.
+ .
+ 9. LIMITATION OF LIABILITY.
+ .
+ UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+ (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+ DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+ OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+ ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+ CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+ WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+ COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+ INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+ LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+ RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+ PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+ EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+ THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+ .
+ 10. U.S. GOVERNMENT END USERS.
+ .
+ The Covered Code is a "commercial item," as that term is defined in
+ 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+ software" and "commercial computer software documentation," as such
+ terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+ C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+ all U.S. Government End Users acquire Covered Code with only those
+ rights set forth herein.
+ .
+ 11. MISCELLANEOUS.
+ .
+ This License represents the complete agreement concerning subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. This License shall be governed by
+ California law provisions (except to the extent applicable law, if
+ any, provides otherwise), excluding its conflict-of-law provisions.
+ With respect to disputes in which at least one party is a citizen of,
+ or an entity chartered or registered to do business in the United
+ States of America, any litigation relating to this License shall be
+ subject to the jurisdiction of the Federal Courts of the Northern
+ District of California, with venue lying in Santa Clara County,
+ California, with the losing party responsible for costs, including
+ without limitation, court costs and reasonable attorneys' fees and
+ expenses. The application of the United Nations Convention on
+ Contracts for the International Sale of Goods is expressly excluded.
+ Any law or regulation which provides that the language of a contract
+ shall be construed against the drafter shall not apply to this
+ License.
+ .
+ 12. RESPONSIBILITY FOR CLAIMS.
+ .
+ As between Initial Developer and the Contributors, each party is
+ responsible for claims and damages arising, directly or indirectly,
+ out of its utilization of rights under this License and You agree to
+ work with Initial Developer and Contributors to distribute such
+ responsibility on an equitable basis. Nothing herein is intended or
+ shall be deemed to constitute any admission of liability.
+ .
+ 13. MULTIPLE-LICENSED CODE.
+ .
+ Initial Developer may designate portions of the Covered Code as
+ "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
+ Developer permits you to utilize portions of the Covered Code under
+ Your choice of the NPL or the alternative licenses, if any, specified
+ by the Initial Developer in the file described in Exhibit A.
+ .
+ EXHIBIT A -Mozilla Public License.
+ .
+ The contents of this file are subject to the Mozilla Public License Version
+ 1.1 (the "License"); you may not use this file except in compliance with the
+ License. You may obtain a copy of the License at http://www.mozilla.org/MPL/
+ .
+ Software distributed under the License is distributed on an "AS IS" basis,
+ WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
+ the specific language governing rights and limitations under the License.
+ .
+ The Original Code is RabbitMQ.
+ .
+ The Initial Developer of the Original Code is GoPivotal, Inc. Copyright (c)
+ 2007-2014 GoPivotal, Inc. All rights reserved.
+
+License: Apache-2.0
+  On Debian GNU/Linux system you can find the complete text of the
+  Apache-2.0 license in '/usr/share/common-licenses/Apache-2.0'
diff --git a/debian/dirs b/debian/dirs
deleted file mode 100644 (file)
index 625b7d4..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-usr/lib/rabbitmq/bin
-usr/lib/erlang/lib
-usr/sbin
-usr/share/man
-var/lib/rabbitmq/mnesia
-var/log/rabbitmq
-etc/logrotate.d
-etc/rabbitmq
-
diff --git a/debian/gbp.conf b/debian/gbp.conf
new file mode 100644 (file)
index 0000000..ee339ed
--- /dev/null
@@ -0,0 +1,6 @@
+[DEFAULT]
+debian-branch = master
+pristine-tar = True
+
+[buildpackage]
+export-dir = ../build-area/
old mode 100644 (file)
new mode 100755 (executable)
similarity index 100%
rename from debian/rabbitmq-server.ocf
rename to debian/ocf/rabbitmq-server
diff --git a/debian/rabbitmq-env.conf b/debian/rabbitmq-env.conf
new file mode 100644 (file)
index 0000000..bebe2ab
--- /dev/null
@@ -0,0 +1,13 @@
+# Defaults to rabbit. This can be useful if you want to run more than one node
+# per machine - RABBITMQ_NODENAME should be unique per erlang-node-and-machine
+# combination. See the clustering on a single machine guide for details:
+# http://www.rabbitmq.com/clustering.html#single-machine
+#NODENAME=rabbit
+
+# By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
+# available. Set this if you only want to bind to one network interface or#
+# address family.
+#NODE_IP_ADDRESS=127.0.0.1
+
+# Defaults to 5672.
+#NODE_PORT=5672
old mode 100644 (file)
new mode 100755 (executable)
index 4fecc2e..a622ae2
@@ -32,6 +32,9 @@ SCRIPT=`basename $0`
 if [ `id -u` = `id -u rabbitmq` -a "$SCRIPT" = "rabbitmq-server" ] ; then
     /usr/lib/rabbitmq/bin/rabbitmq-server "$@"  > "/var/log/rabbitmq/startup_log" 2> "/var/log/rabbitmq/startup_err"
 elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then
+    if [ -f $PWD/.erlang.cookie ] ; then
+        export HOME=.
+    fi
     /usr/lib/rabbitmq/bin/${SCRIPT} "$@"
 elif [ `id -u` = 0 ] ; then
     su rabbitmq -s /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}"
diff --git a/debian/rabbitmq-server-wait b/debian/rabbitmq-server-wait
new file mode 100755 (executable)
index 0000000..cdf53e5
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/sh -e
+##  The contents of this file are subject to the Mozilla Public License
+##  Version 1.1 (the "License"); you may not use this file except in
+##  compliance with the License. You may obtain a copy of the License
+##  at http://www.mozilla.org/MPL/
+##
+##  Software distributed under the License is distributed on an "AS IS"
+##  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+##  the License for the specific language governing rights and
+##  limitations under the License.
+##
+##  The Original Code is RabbitMQ.
+##
+##  The Initial Developer of the Original Code is GoPivotal, Inc.
+##  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+##
+
+# Get default settings with user overrides for (RABBITMQ_)<var_name>
+# Non-empty defaults should be set in rabbitmq-env
+. `dirname $0`/rabbitmq-env
+
+/usr/lib/rabbitmq/bin/rabbitmqctl wait $RABBITMQ_PID_FILE
index 1efb35670ba1b7906ccf100d2a593e8924f12f95..bde5e308952bfed0aabb86bf525e20d1d2763b7c 100644 (file)
@@ -6,6 +6,4 @@
 # to handle many simultaneous connections. Refer to the system
 # documentation for ulimit (in man bash) for more information.
 #
-ulimit -H -n 105472
-ulimit -S -n 102400
-
+#ulimit -n 1024
diff --git a/debian/rabbitmq-server.dirs b/debian/rabbitmq-server.dirs
new file mode 100644 (file)
index 0000000..e6127a0
--- /dev/null
@@ -0,0 +1,3 @@
+usr/lib/erlang/lib
+var/lib/rabbitmq/mnesia
+var/log/rabbitmq
diff --git a/debian/rabbitmq-server.install b/debian/rabbitmq-server.install
new file mode 100644 (file)
index 0000000..902f3dd
--- /dev/null
@@ -0,0 +1,4 @@
+debian/ocf/rabbitmq-server /usr/lib/ocf/resource.d/rabbitmq/
+debian/rabbitmq-server-wait /usr/lib/rabbitmq/bin
+debian/rabbitmq-script-wrapper /usr/lib/rabbitmq/bin
+debian/rabbitmq-env.conf       /etc/rabbitmq
diff --git a/debian/rabbitmq-server.links b/debian/rabbitmq-server.links
new file mode 100644 (file)
index 0000000..0bfa1c5
--- /dev/null
@@ -0,0 +1,3 @@
+/usr/lib/rabbitmq/bin/rabbitmq-script-wrapper /usr/sbin/rabbitmqctl
+/usr/lib/rabbitmq/bin/rabbitmq-script-wrapper /usr/sbin/rabbitmq-server
+/usr/lib/rabbitmq/bin/rabbitmq-script-wrapper /usr/sbin/rabbitmq-plugins
diff --git a/debian/rabbitmq-server.service b/debian/rabbitmq-server.service
new file mode 100644 (file)
index 0000000..faa73c1
--- /dev/null
@@ -0,0 +1,15 @@
+[Unit]
+Description=RabbitMQ Messaging Server
+After=network.target
+
+[Service]
+Type=simple
+User=rabbitmq
+SyslogIdentifier=rabbitmq
+LimitNOFILE=65536
+ExecStart=/usr/sbin/rabbitmq-server
+ExecStartPost=/usr/lib/rabbitmq/bin/rabbitmq-server-wait
+ExecStop=/usr/sbin/rabbitmqctl stop
+
+[Install]
+WantedBy=multi-user.target
old mode 100644 (file)
new mode 100755 (executable)
index cac29c8..489d7df
@@ -1,27 +1,21 @@
 #!/usr/bin/make -f
+# -*- makefile -*-
+#export DH_VERBOSE=1
 
-include /usr/share/cdbs/1/rules/debhelper.mk
-include /usr/share/cdbs/1/class/makefile.mk
+%:
+       dh $@ --parallel --with systemd
 
-RABBIT_LIB=$(DEB_DESTDIR)usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)/
-RABBIT_BIN=$(DEB_DESTDIR)usr/lib/rabbitmq/bin/
+DEB_UPSTREAM_VERSION=$(shell dpkg-parsechangelog | sed -rne 's,^Version: ([^+]+)-.*,\1,p')
+DEB_DESTDIR=debian/rabbitmq-server
+RABBIT_LIB=$(DEB_DESTDIR)/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)
+RABBIT_BIN=$(DEB_DESTDIR)/usr/lib/rabbitmq/bin
+DOCDIR=$(DEB_DESTDIR)/usr/share/doc/rabbitmq-server
 
-DOCDIR=$(DEB_DESTDIR)usr/share/doc/rabbitmq-server/
-DEB_MAKE_INSTALL_TARGET := install TARGET_DIR=$(RABBIT_LIB) SBIN_DIR=$(RABBIT_BIN) DOC_INSTALL_DIR=$(DOCDIR) MAN_DIR=$(DEB_DESTDIR)usr/share/man/
-DEB_MAKE_CLEAN_TARGET:= distclean
-DEB_INSTALL_DOCS_ALL=debian/README
+override_dh_auto_install:
+       dh_auto_install -- TARGET_DIR=$(RABBIT_LIB) SBIN_DIR=$(RABBIT_BIN) \
+               DOC_INSTALL_DIR=$(DOCDIR) MAN_DIR=$(DEB_DESTDIR)/usr/share/man
+       rm -f $(RABBIT_LIB)/LICENSE* $(RABBIT_LIB)/INSTALL*
 
-DEB_DH_INSTALLINIT_ARGS="--no-start"
-
-install/rabbitmq-server::
-       mkdir -p $(DOCDIR)
-       rm $(RABBIT_LIB)LICENSE* $(RABBIT_LIB)INSTALL*
-       for script in rabbitmqctl rabbitmq-server rabbitmq-plugins; do \
-               install -p -D -m 0755 debian/rabbitmq-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \
-       done
-       sed -e 's|@RABBIT_LIB@|/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)|g' <debian/postrm.in >debian/postrm
-       install -p -D -m 0755 debian/rabbitmq-server.ocf $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server
-       install -p -D -m 0644 debian/rabbitmq-server.default $(DEB_DESTDIR)etc/default/rabbitmq-server
-
-clean::
-       rm -f plugins-src/rabbitmq-server debian/postrm plugins/README
+override_dh_auto_clean:
+       rm -f plugins-src/rabbitmq-server plugins/README
+       dh_auto_clean
index b41aff9aedc1003866091af62a3d65c914776e34..e41153d6127aea21226109334dc2289673df1b2d 100644 (file)
@@ -1,4 +1,2 @@
 version=3
-
-http://www.rabbitmq.com/releases/rabbitmq-server/v(.*)/rabbitmq-server-(\d.*)\.tar\.gz \
-       debian uupdate
+http://www.rabbitmq.com/releases/rabbitmq-server/v(.*)/rabbitmq-server-(\d.*)\.tar\.gz
index 58c43e718da42d1b63ce3af5049a60890ca7f7d5..9deeb23c0dcfa4e26223ed4ff3c907de8e98c695 100644 (file)
@@ -96,7 +96,7 @@ The rest of this package is licensed under the Mozilla Public License 1.1
 Authors and Copyright are as described below:
 
      The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+     Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 
 
                           MOZILLA PUBLIC LICENSE
@@ -548,7 +548,7 @@ EXHIBIT A -Mozilla Public License.
      The Original Code is RabbitMQ.
 
      The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.''
+     Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.''
 
      [NOTE: The text of this Exhibit A may differ slightly from the text of
      the notices in the Source Code files of the Original Code. You should
index c54b44e577166f822b6e491b320ef920e716babf..1c9800188e4478723fdad5d409f253e5e1e58286 100644 (file)
@@ -8,24 +8,30 @@ RABBITMQ_LOG_BASE ?= $(TMPDIR)
 
 DEPS_FILE=deps.mk
 SOURCE_DIR=src
+TEST_DIR=test/src
 EBIN_DIR=ebin
+TEST_EBIN_DIR=test/ebin
 INCLUDE_DIR=include
 DOCS_DIR=docs
 INCLUDES=$(wildcard $(INCLUDE_DIR)/*.hrl) $(INCLUDE_DIR)/rabbit_framing.hrl
 SOURCES=$(wildcard $(SOURCE_DIR)/*.erl) $(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl $(USAGES_ERL)
+TEST_SOURCES=$(wildcard $(TEST_DIR)/*.erl)
 BEAM_TARGETS=$(patsubst $(SOURCE_DIR)/%.erl, $(EBIN_DIR)/%.beam, $(SOURCES))
+TEST_BEAM_TARGETS=$(patsubst $(TEST_DIR)/%.erl, $(TEST_EBIN_DIR)/%.beam, $(TEST_SOURCES))
 TARGETS=$(EBIN_DIR)/rabbit.app $(INCLUDE_DIR)/rabbit_framing.hrl $(BEAM_TARGETS) plugins
+TEST_TARGETS=$(TEST_BEAM_TARGETS)
 WEB_URL=http://www.rabbitmq.com/
 MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml))
 WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml $(DOCS_DIR)/rabbitmq-echopid.xml)
 USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-plugins.1.xml
 USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML)))
-QC_MODULES := rabbit_backing_queue_qc
-QC_TRIALS ?= 100
 
 ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes)
 PYTHON=python
 else
+ifeq ($(shell python2.7 -c 'import json' 2>/dev/null && echo yes),yes)
+PYTHON=python2.7
+else
 ifeq ($(shell python2.6 -c 'import simplejson' 2>/dev/null && echo yes),yes)
 PYTHON=python2.6
 else
@@ -37,14 +43,15 @@ PYTHON=python
 endif
 endif
 endif
+endif
 
 BASIC_PLT=basic.plt
 RABBIT_PLT=rabbit.plt
 
 ifndef USE_SPECS
-# our type specs rely on callback specs, which are available in R15B
+# our type specs rely on dict:dict/0 etc, which are only available in 17.0
 # upwards.
-USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,9]), halt().')
+USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,11]), halt().')
 endif
 
 ifndef USE_PROPER_QC
@@ -54,7 +61,13 @@ USE_PROPER_QC:=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:e
 endif
 
 #other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests
-ERLC_OPTS=-I $(INCLUDE_DIR) -o $(EBIN_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
+ERLC_OPTS=-I $(INCLUDE_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
+
+ifdef INSTRUMENT_FOR_QC
+ERLC_OPTS += -DINSTR_MOD=gm_qc
+else
+ERLC_OPTS += -DINSTR_MOD=gm
+endif
 
 include version.mk
 
@@ -102,7 +115,7 @@ ifneq "$(DEFAULT_GOAL_MAKE)" "$(firstword $(sort $(DEFAULT_GOAL_MAKE) $(MAKE_VER
 .DEFAULT_GOAL=all
 endif
 
-all: $(TARGETS)
+all: $(TARGETS) $(TEST_TARGETS)
 
 .PHONY: plugins check-xref
 ifneq "$(PLUGINS_SRC_DIR)" ""
@@ -123,7 +136,7 @@ plugins:
 # Not building plugins
 
 check-xref:
-       $(info xref checks are disabled)
+       $(info xref checks are disabled as there is no plugins-src directory)
 
 endif
 
@@ -135,7 +148,13 @@ $(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app
        escript generate_app $< $@ $(SOURCE_DIR)
 
 $(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE)
-       erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $<
+       erlc -o $(EBIN_DIR) $(ERLC_OPTS) -pa $(EBIN_DIR) $<
+
+$(TEST_EBIN_DIR)/%.beam: $(TEST_DIR)/%.erl | $(TEST_EBIN_DIR)
+       erlc -o $(TEST_EBIN_DIR) $(ERLC_OPTS) -pa $(EBIN_DIR) -pa $(TEST_EBIN_DIR) $<
+
+$(TEST_EBIN_DIR):
+       mkdir -p $(TEST_EBIN_DIR)
 
 $(INCLUDE_DIR)/rabbit_framing.hrl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8)
        $(PYTHON) codegen.py --ignore-conflicts header $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@
@@ -169,6 +188,7 @@ $(BASIC_PLT): $(BEAM_TARGETS)
 clean:
        rm -f $(EBIN_DIR)/*.beam
        rm -f $(EBIN_DIR)/rabbit.app $(EBIN_DIR)/rabbit.boot $(EBIN_DIR)/rabbit.script $(EBIN_DIR)/rabbit.rel
+       rm -rf $(TEST_EBIN_DIR)
        rm -f $(PLUGINS_DIR)/*.ez
        [ -d "$(PLUGINS_SRC_DIR)" ] && PLUGINS_SRC_DIR="" PRESERVE_CLONE_DIR=1 make -C $(PLUGINS_SRC_DIR) clean || true
        rm -f $(INCLUDE_DIR)/rabbit_framing.hrl $(SOURCE_DIR)/rabbit_framing_amqp_*.erl codegen.pyc
@@ -210,19 +230,28 @@ run-background-node: all
        $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
                RABBITMQ_NODE_ONLY=true \
                RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
-               ./scripts/rabbitmq-server
+               ./scripts/rabbitmq-server -detached
 
 run-tests: all
+       echo 'code:add_path("$(TEST_EBIN_DIR)").' | $(ERL_CALL)
+       echo 'code:add_path("$(TEST_EBIN_DIR)").' | $(ERL_CALL) -n hare || true
        OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \
          echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null
 
 run-qc: all
-       $(foreach MOD,$(QC_MODULES),./quickcheck $(RABBITMQ_NODENAME) $(MOD) $(QC_TRIALS))
+       echo 'code:add_path("$(TEST_EBIN_DIR)").' | $(ERL_CALL)
+       ./quickcheck $(RABBITMQ_NODENAME) rabbit_backing_queue_qc 100 40
+       ./quickcheck $(RABBITMQ_NODENAME) gm_qc 1000 200
 
 start-background-node: all
        -rm -f $(RABBITMQ_MNESIA_DIR).pid
        mkdir -p $(RABBITMQ_MNESIA_DIR)
-       nohup sh -c "$(MAKE) run-background-node > $(RABBITMQ_MNESIA_DIR)/startup_log 2> $(RABBITMQ_MNESIA_DIR)/startup_err" > /dev/null &
+       $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
+               RABBITMQ_NODE_ONLY=true \
+               RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
+               ./scripts/rabbitmq-server \
+               > $(RABBITMQ_MNESIA_DIR)/startup_log \
+               2> $(RABBITMQ_MNESIA_DIR)/startup_err &
        ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid kernel
 
 start-rabbit-on-node: all
@@ -346,19 +375,22 @@ install_docs: docs_all install_dirs
                        cp $$manpage $(MAN_DIR)/man$$section; \
                done; \
        done
-       cp $(DOCS_DIR)/rabbitmq.config.example $(DOC_INSTALL_DIR)/rabbitmq.config.example
+       if test "$(DOC_INSTALL_DIR)"; then \
+               cp $(DOCS_DIR)/rabbitmq.config.example $(DOC_INSTALL_DIR)/rabbitmq.config.example; \
+       fi
 
 install_dirs:
        @ OK=true && \
          { [ -n "$(TARGET_DIR)" ] || { echo "Please set TARGET_DIR."; OK=false; }; } && \
          { [ -n "$(SBIN_DIR)" ] || { echo "Please set SBIN_DIR."; OK=false; }; } && \
-         { [ -n "$(MAN_DIR)" ] || { echo "Please set MAN_DIR."; OK=false; }; } && \
-         { [ -n "$(DOC_INSTALL_DIR)" ] || { echo "Please set DOC_INSTALL_DIR."; OK=false; }; } && $$OK
+         { [ -n "$(MAN_DIR)" ] || { echo "Please set MAN_DIR."; OK=false; }; } && $$OK
 
        mkdir -p $(TARGET_DIR)/sbin
        mkdir -p $(SBIN_DIR)
        mkdir -p $(MAN_DIR)
-       mkdir -p $(DOC_INSTALL_DIR)
+       if test "$(DOC_INSTALL_DIR)"; then \
+               mkdir -p $(DOC_INSTALL_DIR); \
+       fi
 
 $(foreach XML,$(USAGES_XML),$(eval $(call usage_dep, $(XML))))
 
index 90e99e622a6ccc993cad3955e123e3fcbb4a8446..67e3a66ad63da40deaee519225d7725273fda427 100644 (file)
@@ -1 +1 @@
-Please see http://www.rabbitmq.com/build-server.html for build instructions.
\ No newline at end of file
+Please see http://www.rabbitmq.com/build-server.html for build instructions.
index b2356bbcc9ab2a9b7e7a82a0c1c1942a28753089..9f16b32425eedda1e95d451c577c3217330faae1 100644 (file)
@@ -11,7 +11,7 @@
 ##  The Original Code is RabbitMQ.
 ##
 ##  The Initial Developer of the Original Code is GoPivotal, Inc.
-##  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+##  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 ##
 
 from __future__ import nested_scopes
@@ -106,7 +106,7 @@ def printFileHeader():
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%"""
 
 def genErl(spec):
diff --git a/rabbitmq-server/codegen/CONTRIBUTING.md b/rabbitmq-server/codegen/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 8ecb4fc83ce823691c2d778b253c10645218e280..f7be2d29953d4b554fb018e732bee88e8ad12d26 100644 (file)
@@ -40,6 +40,7 @@
   <refsynopsisdiv>
     <cmdsynopsis>
       <command>rabbitmq-plugins</command>
+      <arg choice="opt">-n <replaceable>node</replaceable></arg>
       <arg choice="req"><replaceable>command</replaceable></arg>
       <arg choice="opt" rep="repeat"><replaceable>command options</replaceable></arg>
     </cmdsynopsis>
       enabled. Implicitly enabled plugins are automatically disabled again
       when they are no longer required.
     </para>
+
+    <para>
+      The <command>enable</command>, <command>disable</command> and
+      <command>set</command> commands will update the plugins file and
+      then attempt to connect to the broker and ensure it is running
+      all enabled plugins. By default if it is not possible to connect
+      to the running broker (for example if it is stopped) then a
+      warning is displayed. Specify <command>--online</command> or
+      <command>--offline</command> to change this behaviour.
+    </para>
   </refsect1>
 
   <refsect1>
           </variablelist>
           <para>
             Lists all plugins, their versions, dependencies and
-            descriptions. Each plugin is prefixed with a status
-            indicator - [ ] to indicate that the plugin is not
-            enabled, [E] to indicate that it is explicitly enabled,
-            [e] to indicate that it is implicitly enabled, and [!] to
-            indicate that it is enabled but missing and thus not
-            operational.
+            descriptions. Each plugin is prefixed with two status
+            indicator characters inside [ ]. The first indicator can
+            be " " to indicate that the plugin is not enabled, "E" to
+            indicate that it is explicitly enabled, "e" to indicate
+            that it is implicitly enabled, or "!" to indicate that it
+            is enabled but missing and thus not operational. The
+            second indicator can be " " to show that the plugin is not
+            running, or "*" to show that it is.
           </para>
           <para>
             If the optional pattern is given, only plugins whose
       </varlistentry>
 
       <varlistentry>
-        <term><cmdsynopsis><command>enable</command> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
+        <term><cmdsynopsis><command>enable</command> <arg choice="opt">--offline</arg> <arg choice="opt">--online</arg> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
         <listitem>
           <variablelist>
+            <varlistentry>
+              <term>--offline</term>
+              <listitem><para>Just modify the enabled plugins file.</para></listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>--online</term>
+              <listitem><para>Treat failure to connect to the running broker as fatal.</para></listitem>
+            </varlistentry>
             <varlistentry>
               <term>plugin</term>
               <listitem><para>One or more plugins to enable.</para></listitem>
             </varlistentry>
           </variablelist>
           <para>
-            Enables the specified plugins and all their
-            dependencies.
+            Enables the specified plugins and all their dependencies.
           </para>
 
           <para role="example-prefix">For example:</para>
       </varlistentry>
 
       <varlistentry>
-        <term><cmdsynopsis><command>disable</command> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
+        <term><cmdsynopsis><command>disable</command> <arg choice="opt">--offline</arg> <arg choice="opt">--online</arg> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
         <listitem>
           <variablelist>
+            <varlistentry>
+              <term>--offline</term>
+              <listitem><para>Just modify the enabled plugins file.</para></listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>--online</term>
+              <listitem><para>Treat failure to connect to the running broker as fatal.</para></listitem>
+            </varlistentry>
             <varlistentry>
               <term>plugin</term>
               <listitem><para>One or more plugins to disable.</para></listitem>
             </varlistentry>
           </variablelist>
           <para>
-            Disables the specified plugins and all plugins that
-            depend on them.
+            Disables the specified plugins and all their dependencies.
           </para>
 
           <para role="example-prefix">For example:</para>
           </para>
         </listitem>
       </varlistentry>
+
+      <varlistentry>
+        <term><cmdsynopsis><command>set</command> <arg choice="opt">--offline</arg> <arg choice="opt">--online</arg> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
+        <listitem>
+          <variablelist>
+            <varlistentry>
+              <term>--offline</term>
+              <listitem><para>Just modify the enabled plugins file.</para></listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>--online</term>
+              <listitem><para>Treat failure to connect to the running broker as fatal.</para></listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>plugin</term>
+              <listitem><para>Zero or more plugins to enable.</para></listitem>
+            </varlistentry>
+          </variablelist>
+          <para>
+            Enables the specified plugins and all their
+            dependencies. Unlike <command>rabbitmq-plugins
+            enable</command> this command ignores and overwrites any
+            existing enabled plugins. <command>rabbitmq-plugins
+            set</command> with no plugin arguments is a legal command
+            meaning "disable all plugins".
+          </para>
+
+          <para role="example-prefix">For example:</para>
+          <screen role="example">rabbitmq-plugins set rabbitmq_management</screen>
+          <para role="example">
+            This command enables the <command>management</command>
+            plugin and its dependencies and disables everything else.
+          </para>
+        </listitem>
+      </varlistentry>
+
     </variablelist>
 
   </refsect1>
index a4bd15808740dee0a7549a779110a382acdbbade..3368960b80f6dec4736a3c6a473927485817048c 100644 (file)
@@ -66,8 +66,7 @@ Display usage information.
             <para>
 Install the service. The service will not be started.
 Subsequent invocations will update the service parameters if
-relevant environment variables were modified or if the active
-plugins were changed.
+relevant environment variables were modified.
             </para>
           </listitem>
         </varlistentry>
index a128bfbc54f10be40ab526a5c765782dd54f2019..9b3855cd86930ffa8e870c548db9db90402c0881 100644 (file)
    %%
    %% {ssl_listeners, [5671]},
 
+   %% Maximum time for AMQP 0-8/0-9/0-9-1 handshake (after socket connection
+   %% and SSL handshake), in milliseconds.
+   %%
+   %% {handshake_timeout, 10000},
+
    %% Log levels (currently just used for connection logging).
-   %% One of 'info', 'warning', 'error' or 'none', in decreasing order
-   %% of verbosity. Defaults to 'info'.
+   %% One of 'debug', 'info', 'warning', 'error' or 'none', in decreasing
+   %% order of verbosity. Defaults to 'info'.
    %%
-   %% {log_levels, [{connection, info}]},
+   %% {log_levels, [{connection, info}, {channel, info}]},
 
    %% Set to 'true' to perform reverse DNS lookups when accepting a
    %% connection. Hostnames will then be shown instead of IP addresses
 
    %% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
    %% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
-   %% configuration section later in this fail and the README in
+   %% configuration section later in this file and the README in
    %% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
    %% details.
    %%
    %%
    %% {ssl_cert_login_from, common_name},
 
+   %% SSL handshake timeout, in milliseconds.
+   %%
+   %% {ssl_handshake_timeout, 5000},
+
    %%
    %% Default User / VHost
    %% ====================
    %%
    %% {cluster_nodes, {['rabbit@my.host.com'], disc}},
 
+   %% Interval (in milliseconds) at which we send keepalive messages
+   %% to other cluster members. Note that this is not the same thing
+   %% as net_ticktime; missed keepalive messages will not cause nodes
+   %% to be considered down.
+   %%
+   %% {cluster_keepalive_interval, 10000},
+
    %% Set (internal) statistics collection granularity.
    %%
    %% {collect_statistics, none},
 
    %% Explicitly enable/disable hipe compilation.
    %%
-   %% {hipe_compile, true}
+   %% {hipe_compile, true},
+
+   %% Timeout used when waiting for Mnesia tables in a cluster to
+   %% become available.
+   %%
+   %% {mnesia_table_loading_timeout, 30000},
+
+   %% Size in bytes below which to embed messages in the queue index. See
+   %% http://www.rabbitmq.com/persistence-conf.html
+   %%
+   %% {queue_index_embed_msgs_below, 4096}
 
   ]},
 
    %%                         {certfile,   "/path/to/cert.pem"},
    %%                         {keyfile,    "/path/to/key.pem"}]}]},
 
+   %% One of 'basic', 'detailed' or 'none'. See
+   %% http://www.rabbitmq.com/management.html#fine-stats for more details.
+   %% {rates_mode, basic},
+
    %% Configure how long aggregated data (such as message rates and queue
    %% lengths) is retained. Please read the plugin's documentation in
-   %% https://www.rabbitmq.com/management.html#configuration for more
+   %% http://www.rabbitmq.com/management.html#configuration for more
    %% details.
    %%
    %% {sample_retention_policies,
    %%   {detailed, [{10, 5}]}]}
   ]},
 
- {rabbitmq_management_agent,
-  [%% Misc/Advanced Options
-   %%
-   %% NB: Change these only if you understand what you are doing!
-   %%
-   %% {force_fine_statistics, true}
-  ]},
-
  %% ----------------------------------------------------------------------------
  %% RabbitMQ Shovel Plugin
  %%
  %% ----------------------------------------------------------------------------
  %% RabbitMQ MQTT Adapter
  %%
- %% See http://hg.rabbitmq.com/rabbitmq-mqtt/file/stable/README.md for details
+ %% See https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md
+ %% for details
  %% ----------------------------------------------------------------------------
 
  {rabbitmq_mqtt,
  %% ----------------------------------------------------------------------------
  %% RabbitMQ AMQP 1.0 Support
  %%
- %% See http://hg.rabbitmq.com/rabbitmq-amqp1.0/file/default/README.md
+ %% See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md
  %% for details
  %% ----------------------------------------------------------------------------
 
index 01b024a2354cf0b3891b8288c66fc2b2da041f6d..92d48466383d6c0135a8a39b8847c665b6b46453 100644 (file)
@@ -41,6 +41,7 @@
     <cmdsynopsis>
       <command>rabbitmqctl</command>
       <arg choice="opt">-n <replaceable>node</replaceable></arg>
+      <arg choice="opt">-t <replaceable>timeout</replaceable></arg>
       <arg choice="opt">-q</arg>
       <arg choice="req"><replaceable>command</replaceable></arg>
       <arg choice="opt" rep="repeat"><replaceable>command options</replaceable></arg>
           </para>
         </listitem>
       </varlistentry>
+      <varlistentry>
+        <term><cmdsynopsis><arg choice="opt">-t <replaceable>timeout</replaceable></arg></cmdsynopsis></term>
+        <listitem>
+          <para role="usage">
+            Operation timeout in seconds. Only applicable to "list" commands.
+            Default is "infinity".
+          </para>
+        </listitem>
+      </varlistentry>
     </variablelist>
   </refsect1>
 
               online, except when using the <command>--offline</command> flag.
             </para>
             <para>
-              When using the <command>--offline</command> flag the node you
-              connect to will become the canonical source for cluster metadata
-              (e.g. which queues exist), even if it was not before. Therefore
-              you should use this command on the latest node to shut down if
-              at all possible.
+              When using the <command>--offline</command> flag
+              rabbitmqctl will not attempt to connect to a node as
+              normal; instead it will temporarily become the node in
+              order to make the change. This is useful if the node
+              cannot be started normally. In this case the node will
+              become the canonical source for cluster metadata
+              (e.g. which queues exist), even if it was not
+              before. Therefore you should use this command on the
+              latest node to shut down if at all possible.
             </para>
             <para role="example-prefix">For example:</para>
             <screen role="example">rabbitmqctl -n hare@mcnulty forget_cluster_node rabbit@stringer</screen>
             </para>
           </listitem>
         </varlistentry>
+        <varlistentry>
+          <term><cmdsynopsis><command>rename_cluster_node</command> <arg choice="req">oldnode1</arg> <arg choice="req">newnode1</arg> <arg choice="opt">oldnode2</arg> <arg choice="opt">newnode2 ...</arg></cmdsynopsis></term>
+          <listitem>
+            <para>
+              Supports renaming of cluster nodes in the local database.
+            </para>
+            <para>
+              This subcommand causes rabbitmqctl to temporarily become
+              the node in order to make the change. The local cluster
+              node must therefore be completely stopped; other nodes
+              can be online or offline.
+            </para>
+            <para>
+              This subcommand takes an even number of arguments, in
+              pairs representing the old and new names for nodes. You
+              must specify the old and new names for this node and for
+              any other nodes that are stopped and being renamed at
+              the same time.
+            </para>
+            <para>
+              It is possible to stop all nodes and rename them all
+              simultaneously (in which case old and new names for all
+              nodes must be given to every node) or stop and rename
+              nodes one at a time (in which case each node only needs
+              to be told how its own name is changing).
+            </para>
+            <para role="example-prefix">For example:</para>
+            <screen role="example">rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia</screen>
+            <para role="example">
+              This command will rename the node
+              <command>rabbit@misshelpful</command> to the node
+              <command>rabbit@cordelia</command>.
+            </para>
+          </listitem>
+        </varlistentry>
         <varlistentry>
           <term><cmdsynopsis><command>update_cluster_nodes</command> <arg choice="req">clusternode</arg></cmdsynopsis>
           </term>
             </para>
           </listitem>
         </varlistentry>
+        <varlistentry>
+          <term><cmdsynopsis><command>force_boot</command></cmdsynopsis></term>
+          <listitem>
+            <para>
+              Ensure that the node will start next time, even if it
+              was not the last to shut down.
+            </para>
+            <para>
+              Normally when you shut down a RabbitMQ cluster
+              altogether, the first node you restart should be the
+              last one to go down, since it may have seen things
+              happen that other nodes did not. But sometimes
+              that's not possible: for instance if the entire cluster
+              loses power then all nodes may think they were not the
+              last to shut down.
+            </para>
+            <para>
+              In such a case you can invoke <command>rabbitmqctl
+              force_boot</command> while the node is down. This will
+              tell the node to unconditionally start next time you ask
+              it to. If any changes happened to the cluster after this
+              node shut down, they will be lost.
+            </para>
+            <para>
+              If the last node to go down is permanently lost then you
+              should use <command>rabbitmqctl forget_cluster_node
+              --offline</command> in preference to this command, as it
+              will ensure that mirrored queues which were mastered on
+              the lost node get promoted.
+            </para>
+            <para role="example-prefix">For example:</para>
+            <screen role="example">rabbitmqctl force_boot</screen>
+            <para role="example">
+              This will force the node not to wait for other nodes
+              next time it is started.
+            </para>
+          </listitem>
+        </varlistentry>
         <varlistentry>
           <term><cmdsynopsis><command>sync_queue</command> <arg choice="req">queue</arg></cmdsynopsis>
           </term>
             </para>
           </listitem>
         </varlistentry>
+        <varlistentry>
+          <term><cmdsynopsis><command>purge_queue</command> <arg choice="req">queue</arg></cmdsynopsis>
+          </term>
+          <listitem>
+            <variablelist>
+              <varlistentry>
+                <term>queue</term>
+                <listitem>
+                  <para>
+                    The name of the queue to purge.
+                  </para>
+                </listitem>
+              </varlistentry>
+            </variablelist>
+            <para>
+              Purges a queue (removes all messages in it).
+            </para>
+          </listitem>
+        </varlistentry>
         <varlistentry>
           <term><cmdsynopsis><command>set_cluster_name</command> <arg choice="req">name</arg></cmdsynopsis></term>
           <listitem>
                 <listitem><para>Sum of ready and unacknowledged messages
                   (queue depth).</para></listitem>
               </varlistentry>
+              <varlistentry>
+                <term>messages_ready_ram</term>
+                <listitem><para>Number of messages from messages_ready which are resident in ram.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>messages_unacknowledged_ram</term>
+                <listitem><para>Number of messages from messages_unacknowledged which are resident in ram.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>messages_ram</term>
+                <listitem><para>Total number of messages which are resident in ram.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>messages_persistent</term>
+                <listitem><para>Total number of persistent messages in the queue (will always be 0 for transient queues).</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>message_bytes</term>
+                <listitem><para>Sum of the size of all message bodies in the queue. This does not include the message properties (including headers) or any overhead.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>message_bytes_ready</term>
+                <listitem><para>Like <command>message_bytes</command> but counting only those messages ready to be delivered to clients.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>message_bytes_unacknowledged</term>
+                <listitem><para>Like <command>message_bytes</command> but counting only those messages delivered to clients but not yet acknowledged.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>message_bytes_ram</term>
+                <listitem><para>Like <command>message_bytes</command> but counting only those messages which are in RAM.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>message_bytes_persistent</term>
+                <listitem><para>Like <command>message_bytes</command> but counting only those messages which are persistent.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>disk_reads</term>
+                <listitem><para>Total number of times messages have been read from disk by this queue since it started.</para></listitem>
+              </varlistentry>
+              <varlistentry>
+                <term>disk_writes</term>
+                <listitem><para>Total number of times messages have been written to disk by this queue since it started.</para></listitem>
+              </varlistentry>
               <varlistentry>
                 <term>consumers</term>
                 <listitem><para>Number of consumers.</para></listitem>
                 message loss.</para></listitem>
               </varlistentry>
               <varlistentry>
-                <term>status</term>
-                <listitem><para>The status of the queue. Normally
-                'running', but may be "{syncing, MsgCount}" if the queue is
-                synchronising.</para></listitem>
+                <term>state</term>
+                <listitem><para>The state of the queue. Normally
+                'running', but may be "{syncing, MsgCount}" if the
+                queue is synchronising. Queues which are located on
+                cluster nodes that are currently down will be shown
+                with a status of 'down' (and most other
+                <command>queueinfoitem</command>s will be
+                unavailable).</para></listitem>
               </varlistentry>
             </variablelist>
             <para>
                 <term>send_pend</term>
                 <listitem><para>Send queue size.</para></listitem>
               </varlistentry>
+              <varlistentry>
+                <term>connected_at</term>
+                <listitem><para>Date and time this connection was established, as timestamp.</para></listitem>
+              </varlistentry>
             </variablelist>
             <para>
               If no <command>connectioninfoitem</command>s are
           <listitem>
             <para>
               Display the name and value of each variable in the
-              application environment.
+              application environment for each running application.
             </para>
           </listitem>
         </varlistentry>
               </varlistentry>
             </variablelist>
             <para>
-              Starts tracing.
+              Starts tracing. Note that the trace state is not
+              persistent; it will revert to being off if the server is
+              restarted.
             </para>
           </listitem>
         </varlistentry>
index aff1472676dc50a6bf8390ca0044267e9878dfc4..df58ba3988ac6745ebca9bf057ae93d1737f4e72 100644 (file)
@@ -1,7 +1,7 @@
 {application, rabbit,           %% -*- erlang -*-
  [{description, "RabbitMQ"},
   {id, "RabbitMQ"},
-  {vsn, "3.3.5"},
+  {vsn, "3.5.4"},
   {modules, []},
   {registered, [rabbit_amqqueue_sup,
                 rabbit_log,
@@ -29,6 +29,7 @@
          {heartbeat, 580},
          {msg_store_file_size_limit, 16777216},
          {queue_index_max_journal_entries, 65536},
+         {queue_index_embed_msgs_below, 4096},
          {default_user, <<"guest">>},
          {default_pass, <<"guest">>},
          {default_user_tags, [administrator]},
          {server_properties, []},
          {collect_statistics, none},
          {collect_statistics_interval, 5000},
+         {mnesia_table_loading_timeout, 30000},
          {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
          {auth_backends, [rabbit_auth_backend_internal]},
          {delegate_count, 16},
          {trace_vhosts, []},
          {log_levels, [{connection, info}]},
          {ssl_cert_login_from, distinguished_name},
+         {ssl_handshake_timeout, 5000},
+         {ssl_allow_poodle_attack, false},
+         {handshake_timeout, 10000},
          {reverse_dns_lookups, false},
          {cluster_partition_handling, ignore},
+         {cluster_keepalive_interval, 10000},
          {tcp_listen_options, [binary,
                                {packet,        raw},
                                {reuseaddr,     true},
@@ -73,5 +79,7 @@
            mnesia_lib, rpc, mnesia_tm, qlc, sofs, proplists, credit_flow,
            pmon, ssl_connection, tls_connection, ssl_record, tls_record,
            gen_fsm, ssl]},
-         {ssl_apps, [asn1, crypto, public_key, ssl]}
+         {ssl_apps, [asn1, crypto, public_key, ssl]},
+         %% see rabbitmq-server#114
+         {mirroring_flow_control, true}
         ]}]}.
index 245c23bc4f2edc86b793a15eb7e96fc760c4f2aa..5a98e7059beed27f84b75e033f985f7ce27265f7 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -ifdef(use_specs).
@@ -20,9 +20,9 @@
 -type(args() :: any()).
 -type(members() :: [pid()]).
 
--spec(joined/2          :: (args(), members())    -> callback_result()).
--spec(members_changed/3 :: (args(), members(), members()) -> callback_result()).
--spec(handle_msg/3      :: (args(), pid(), any()) -> callback_result()).
--spec(terminate/2       :: (args(), term())       -> any()).
+-spec(joined/2           :: (args(), members())    -> callback_result()).
+-spec(members_changed/3  :: (args(), members(),members()) -> callback_result()).
+-spec(handle_msg/3       :: (args(), pid(), any()) -> callback_result()).
+-spec(handle_terminate/2 :: (args(), term())       -> any()).
 
 -endif.
index 5ac3197ed96c5b0f01b39e431997295c4c935bba..ddcfd6a6480357336d1e1f790aed885e310ee876 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
+%% Passed around most places
 -record(user, {username,
                tags,
-               auth_backend, %% Module this user came from
-               impl          %% Scratch space for that module
-              }).
+               authz_backends}). %% List of {Module, AuthUserImpl} pairs
 
+%% Passed to auth backends
+-record(auth_user, {username,
+                    tags,
+                    impl}).
+
+%% Implementation for the internal auth backend
 -record(internal_user, {username, password_hash, tags}).
 -record(permission, {configure, write, read}).
 -record(user_vhost, {username, virtual_host}).
 
 -record(resource, {virtual_host, kind, name}).
 
--record(exchange, {name, type, durable, auto_delete, internal, arguments,
-                   scratches, policy, decorators}).
--record(exchange_serial, {name, next}).
+%% fields described as 'transient' here are cleared when writing to
+%% rabbit_durable_<thing>
+-record(exchange, {
+          name, type, durable, auto_delete, internal, arguments, %% immutable
+          scratches,    %% durable, explicitly updated via update_scratch/3
+          policy,       %% durable, implicitly updated when policy changes
+          decorators}). %% transient, recalculated in store/1 (i.e. recovery)
+
+-record(amqqueue, {
+          name, durable, auto_delete, exclusive_owner = none, %% immutable
+          arguments,                   %% immutable
+          pid,                         %% durable (just so we know home node)
+          slave_pids, sync_slave_pids, %% transient
+          recoverable_slaves,          %% durable
+          policy,                      %% durable, implicit update as above
+          gm_pids,                     %% transient
+          decorators,                  %% transient, recalculated as above
+          state}).                     %% durable (have we crashed?)
 
--record(amqqueue, {name, durable, auto_delete, exclusive_owner = none,
-                   arguments, pid, slave_pids, sync_slave_pids, policy,
-                   gm_pids, decorators}).
+-record(exchange_serial, {name, next}).
 
 %% mnesia doesn't like unary records, so we add a dummy 'value' field
 -record(route, {binding, value = const}).
                         is_persistent}).
 
 -record(ssl_socket, {tcp, ssl}).
--record(delivery, {mandatory, confirm, sender, message, msg_seq_no}).
+-record(delivery, {mandatory, confirm, sender, message, msg_seq_no, flow}).
 -record(amqp_error, {name, explanation = "", method = none}).
 
 -record(event, {type, props, reference = undefined, timestamp}).
 
--record(message_properties, {expiry, needs_confirming = false}).
+-record(message_properties, {expiry, needs_confirming = false, size}).
 
 -record(plugin, {name,          %% atom()
                  version,       %% string()
 
 %%----------------------------------------------------------------------------
 
--define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2014 GoPivotal, Inc.").
+-define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2015 Pivotal Software, Inc.").
 -define(INFORMATION_MESSAGE, "Licensed under the MPL.  See http://www.rabbitmq.com/").
 -define(ERTS_MINIMUM, "5.6.3").
 
 -define(DESIRED_HIBERNATE,         10000).
 -define(CREDIT_DISC_BOUND,   {2000, 500}).
 
-%% This is dictated by `erlang:send_after' on which we depend to implement TTL.
--define(MAX_EXPIRY_TIMER, 4294967295).
-
 -define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
 -define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
 -define(DELETED_HEADER, <<"BCC">>).
diff --git a/rabbitmq-server/include/rabbit_cli.hrl b/rabbitmq-server/include/rabbit_cli.hrl
new file mode 100644 (file)
index 0000000..1bffc9a
--- /dev/null
@@ -0,0 +1,50 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-define(NODE_OPT, "-n").
+-define(QUIET_OPT, "-q").
+-define(VHOST_OPT, "-p").
+-define(TIMEOUT_OPT, "-t").
+
+-define(VERBOSE_OPT, "-v").
+-define(MINIMAL_OPT, "-m").
+-define(ENABLED_OPT, "-E").
+-define(ENABLED_ALL_OPT, "-e").
+
+-define(PRIORITY_OPT, "--priority").
+-define(APPLY_TO_OPT, "--apply-to").
+-define(RAM_OPT, "--ram").
+-define(OFFLINE_OPT, "--offline").
+-define(ONLINE_OPT, "--online").
+
+
+-define(NODE_DEF(Node), {?NODE_OPT, {option, Node}}).
+-define(QUIET_DEF, {?QUIET_OPT, flag}).
+-define(VHOST_DEF, {?VHOST_OPT, {option, "/"}}).
+-define(TIMEOUT_DEF, {?TIMEOUT_OPT, {option, "infinity"}}).
+
+-define(VERBOSE_DEF, {?VERBOSE_OPT, flag}).
+-define(MINIMAL_DEF, {?MINIMAL_OPT, flag}).
+-define(ENABLED_DEF, {?ENABLED_OPT, flag}).
+-define(ENABLED_ALL_DEF, {?ENABLED_ALL_OPT, flag}).
+
+-define(PRIORITY_DEF, {?PRIORITY_OPT, {option, "0"}}).
+-define(APPLY_TO_DEF, {?APPLY_TO_OPT, {option, "all"}}).
+-define(RAM_DEF, {?RAM_OPT, flag}).
+-define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
+-define(ONLINE_DEF, {?ONLINE_OPT, flag}).
+
+-define(RPC_TIMEOUT, infinity).
index 4e726b07abc4c216bcd3cdffd4c3befcdd294dc2..803ed6b7ce5e305cbe82699464866fd96d03d5cd 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -include("rabbit.hrl").
index 9b9f5e8511f685165eb49f29f97d3bce4ce33675..4ab8c86636f73024b55e991136c4989224ce4351 100644 (file)
@@ -35,15 +35,32 @@ REPOS:= \
     toke \
     webmachine-wrapper
 
-BRANCH:=default
+BRANCH:=master
+
+UMBRELLA_REPO_FETCH:=$(shell git remote -v 2>/dev/null | awk '/^origin\t.+ \(fetch\)$$/ { print $$2; }')
+ifdef UMBRELLA_REPO_FETCH
+GIT_CORE_REPOBASE_FETCH:=$(shell dirname $(UMBRELLA_REPO_FETCH))
+GIT_CORE_SUFFIX_FETCH:=$(suffix $(UMBRELLA_REPO_FETCH))
+else
+GIT_CORE_REPOBASE_FETCH:=https://github.com/rabbitmq
+GIT_CORE_SUFFIX_FETCH:=.git
+endif
 
-HG_CORE_REPOBASE:=$(shell dirname `hg paths default 2>/dev/null` 2>/dev/null)
-ifndef HG_CORE_REPOBASE
-HG_CORE_REPOBASE:=http://hg.rabbitmq.com/
+UMBRELLA_REPO_PUSH:=$(shell git remote -v 2>/dev/null | awk '/^origin\t.+ \(push\)$$/ { print $$2; }')
+ifdef UMBRELLA_REPO_PUSH
+GIT_CORE_REPOBASE_PUSH:=$(shell dirname $(UMBRELLA_REPO_PUSH))
+GIT_CORE_SUFFIX_PUSH:=$(suffix $(UMBRELLA_REPO_PUSH))
+else
+GIT_CORE_REPOBASE_PUSH:=git@github.com:rabbitmq
+GIT_CORE_SUFFIX_PUSH:=.git
 endif
 
 VERSION:=0.0.0
 
+ifndef VERBOSE
+QUIET:=@
+endif
+
 #----------------------------------
 
 all:
@@ -70,18 +87,18 @@ plugins-srcdist:
        rm -rf $(PLUGINS_SRC_DIST_DIR)
        mkdir -p $(PLUGINS_SRC_DIST_DIR)/licensing
 
-       rsync -a --exclude '.hg*' rabbitmq-erlang-client $(PLUGINS_SRC_DIST_DIR)/
+       rsync -a --exclude '.git*' rabbitmq-erlang-client $(PLUGINS_SRC_DIST_DIR)/
        touch $(PLUGINS_SRC_DIST_DIR)/rabbitmq-erlang-client/.srcdist_done
 
-       rsync -a --exclude '.hg*' rabbitmq-server $(PLUGINS_SRC_DIST_DIR)/
+       rsync -a --exclude '.git*' rabbitmq-server $(PLUGINS_SRC_DIST_DIR)/
        touch $(PLUGINS_SRC_DIST_DIR)/rabbitmq-server/.srcdist_done
 
        $(MAKE) -f all-packages.mk copy-srcdist VERSION=$(VERSION) PLUGINS_SRC_DIST_DIR=$(PLUGINS_SRC_DIST_DIR)
        cp Makefile *.mk generate* $(PLUGINS_SRC_DIST_DIR)/
        echo "This is the released version of rabbitmq-public-umbrella. \
-You can clone the full version with: hg clone http://hg.rabbitmq.com/rabbitmq-public-umbrella" > $(PLUGINS_SRC_DIST_DIR)/README
+You can clone the full version with: git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git" > $(PLUGINS_SRC_DIST_DIR)/README
 
-       PRESERVE_CLONE_DIR=1 make -C $(PLUGINS_SRC_DIST_DIR) clean
+       PRESERVE_CLONE_DIR=1 $(MAKE) -C $(PLUGINS_SRC_DIST_DIR) clean
        rm -rf $(PLUGINS_SRC_DIST_DIR)/rabbitmq-server
 
 #----------------------------------
@@ -105,11 +122,56 @@ up_c: named_update
 #----------------------------------
 
 $(REPOS):
-       hg clone $(HG_CORE_REPOBASE)/$@
+       $(QUIET)retries=5; \
+       umbrella_branch="$$(git branch | awk '/^\* / { print $$2; }')"; \
+       if test "$$umbrella_branch" = "stable"; then \
+         branch_arg="-b $$umbrella_branch"; \
+       fi; \
+       while ! git clone $$branch_arg $(GIT_CORE_REPOBASE_FETCH)/$@$(GIT_CORE_SUFFIX_FETCH); do \
+         retries=$$((retries - 1)); \
+         if test "$$retries" = 0; then break; fi; \
+         sleep 1; \
+       done
+       $(QUIET)test -d $@
+       $(QUIET)global_user_name="$$(git config --global user.name)"; \
+       global_user_email="$$(git config --global user.email)"; \
+       user_name="$$(git config user.name)"; \
+       user_email="$$(git config user.email)"; \
+       cd $@ && \
+       git remote set-url --push origin $(GIT_CORE_REPOBASE_PUSH)/$@$(GIT_CORE_SUFFIX_PUSH) && \
+       if test "$$global_user_name" != "$$user_name"; then git config user.name "$$user_name"; fi && \
+       if test "$$global_user_email" != "$$user_email"; then git config user.email "$$user_email"; fi
+
 
 .PHONY: checkout
 checkout: $(REPOS)
 
+.PHONY: list-repos
+list-repos:
+       @for repo in $(REPOS); do echo $$repo; done
+
+.PHONY: sync-gituser
+sync-gituser:
+       @global_user_name="$$(git config --global user.name)"; \
+       global_user_email="$$(git config --global user.email)"; \
+       user_name="$$(git config user.name)"; \
+       user_email="$$(git config user.email)"; \
+       for repo in $(REPOS); do \
+       cd $$repo && \
+       git config --unset user.name && \
+       git config --unset user.email && \
+       if test "$$global_user_name" != "$$user_name"; then git config user.name "$$user_name"; fi && \
+       if test "$$global_user_email" != "$$user_email"; then git config user.email "$$user_email"; fi && \
+       cd ..; done
+
+.PHONY: sync-gitremote
+sync-gitremote:
+       @for repo in $(REPOS); do \
+       cd $$repo && \
+       git remote set-url origin $(GIT_CORE_REPOBASE_FETCH)/$$repo$(GIT_CORE_SUFFIX_FETCH) && \
+       git remote set-url --push origin $(GIT_CORE_REPOBASE_PUSH)/$$repo$(GIT_CORE_SUFFIX_PUSH) && \
+       cd ..; done
+
 #----------------------------------
 # Subrepository management
 
@@ -137,38 +199,42 @@ endef
 # Do not allow status to fork with -j otherwise output will be garbled
 .PHONY: status
 status: checkout
-       $(foreach DIR,. $(REPOS), \
-               (cd $(DIR); OUT=$$(hg st -mad); \
-               if \[ ! -z "$$OUT" \]; then echo "\n$(DIR):\n$$OUT"; fi) &&) true
+       @for repo in . $(REPOS); do \
+               echo "$$repo:"; \
+               cd "$$repo" && git status -s && cd - >/dev/null; \
+       done
 
 .PHONY: pull
 pull: $(foreach DIR,. $(REPOS),$(DIR)+pull)
 
-$(eval $(call repo_targets,. $(REPOS),pull,| %,(cd % && hg pull)))
+$(eval $(call repo_targets,. $(REPOS),pull,| %,\
+       (cd % && git fetch -p && \
+        (! git symbolic-ref -q HEAD || git pull --ff-only))))
 
 .PHONY: update
-update: $(foreach DIR,. $(REPOS),$(DIR)+update)
-
-$(eval $(call repo_targets,. $(REPOS),update,%+pull,(cd % && hg up)))
+update: pull
 
 .PHONY: named_update
 named_update: $(foreach DIR,. $(REPOS),$(DIR)+named_update)
 
-$(eval $(call repo_targets,. $(REPOS),named_update,%+pull,\
-       (cd % && hg up -C $(BRANCH))))
+$(eval $(call repo_targets,. $(REPOS),named_update,| %,\
+       (cd % && git fetch -p && git checkout $(BRANCH) && \
+        (! git symbolic-ref -q HEAD || git pull --ff-only))))
 
 .PHONY: tag
 tag: $(foreach DIR,. $(REPOS),$(DIR)+tag)
 
-$(eval $(call repo_targets,. $(REPOS),tag,| %,(cd % && hg tag $(TAG))))
+$(eval $(call repo_targets,. $(REPOS),tag,| %,\
+       (cd % && git tag $(TAG))))
 
 .PHONY: push
 push: $(foreach DIR,. $(REPOS),$(DIR)+push)
 
-# "|| true" sicne hg push fails if there are no changes
-$(eval $(call repo_targets,. $(REPOS),push,| %,(cd % && hg push -f || true)))
+$(eval $(call repo_targets,. $(REPOS),push,| %,\
+       (cd % && git push && git push --tags)))
 
 .PHONY: checkin
 checkin: $(foreach DIR,. $(REPOS),$(DIR)+checkin)
 
-$(eval $(call repo_targets,. $(REPOS),checkin,| %,(cd % && hg ci)))
+$(eval $(call repo_targets,. $(REPOS),checkin,| %,\
+       (cd % && (test -z "$$$$(git status -s -uno)" || git commit -a))))
index ae655c6bb56fbe7c5b6cbfc76842dc2654284dc6..58177d4786f1ecea97b211234b5865621fc81b82 100644 (file)
@@ -1 +1 @@
-This is the released version of rabbitmq-public-umbrella. You can clone the full version with: hg clone http://hg.rabbitmq.com/rabbitmq-public-umbrella
+This is the released version of rabbitmq-public-umbrella. You can clone the full version with: git clone https://github.com/rabbitmq/rabbitmq-public-umbrella.git
diff --git a/rabbitmq-server/plugins-src/cowboy-wrapper/CONTRIBUTING.md b/rabbitmq-server/plugins-src/cowboy-wrapper/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/plugins-src/cowboy-wrapper/cowboy-git/src/cowboy_http_req.erl.orig b/rabbitmq-server/plugins-src/cowboy-wrapper/cowboy-git/src/cowboy_http_req.erl.orig
deleted file mode 100644 (file)
index bf4ac7a..0000000
+++ /dev/null
@@ -1,815 +0,0 @@
-%% Copyright (c) 2011, Loïc Hoguin <essen@dev-extend.eu>
-%% Copyright (c) 2011, Anthony Ramine <nox@dev-extend.eu>
-%%
-%% Permission to use, copy, modify, and/or distribute this software for any
-%% purpose with or without fee is hereby granted, provided that the above
-%% copyright notice and this permission notice appear in all copies.
-%%
-%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-%% @doc HTTP request manipulation API.
-%%
-%% Almost all functions in this module return a new <em>Req</em> variable.
-%% It should always be used instead of the one used in your function call
-%% because it keeps the state of the request. It also allows Cowboy to do
-%% some lazy evaluation and cache results where possible.
--module(cowboy_http_req).
-
--export([
-       method/1, version/1, peer/1, peer_addr/1,
-       host/1, host_info/1, raw_host/1, port/1,
-       path/1, path_info/1, raw_path/1,
-       qs_val/2, qs_val/3, qs_vals/1, raw_qs/1,
-       binding/2, binding/3, bindings/1,
-       header/2, header/3, headers/1,
-       parse_header/2, parse_header/3,
-       cookie/2, cookie/3, cookies/1,
-       meta/2, meta/3
-]). %% Request API.
-
--export([
-       body/1, body/2, body_qs/1,
-       multipart_data/1, multipart_skip/1
-]). %% Request Body API.
-
--export([
-       set_resp_cookie/4, set_resp_header/3, set_resp_body/2,
-       set_resp_body_fun/3, has_resp_header/2, has_resp_body/1,
-       reply/2, reply/3, reply/4,
-       chunked_reply/2, chunked_reply/3, chunk/2,
-       upgrade_reply/3
-]). %% Response API.
-
--export([
-       compact/1, transport/1
-]). %% Misc API.
-
--include("include/http.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-%% Request API.
-
-%% @doc Return the HTTP method of the request.
--spec method(#http_req{}) -> {cowboy_http:method(), #http_req{}}.
-method(Req) ->
-       {Req#http_req.method, Req}.
-
-%% @doc Return the HTTP version used for the request.
--spec version(#http_req{}) -> {cowboy_http:version(), #http_req{}}.
-version(Req) ->
-       {Req#http_req.version, Req}.
-
-%% @doc Return the peer address and port number of the remote host.
--spec peer(#http_req{}) -> {{inet:ip_address(), inet:ip_port()}, #http_req{}}.
-peer(Req=#http_req{socket=Socket, transport=Transport, peer=undefined}) ->
-       {ok, Peer} = Transport:peername(Socket),
-       {Peer, Req#http_req{peer=Peer}};
-peer(Req) ->
-       {Req#http_req.peer, Req}.
-
-%% @doc Returns the peer address calculated from headers.
--spec peer_addr(#http_req{}) -> {inet:ip_address(), #http_req{}}.
-peer_addr(Req = #http_req{}) ->
-       {RealIp, Req1} = header(<<"X-Real-Ip">>, Req),
-       {ForwardedForRaw, Req2} = header(<<"X-Forwarded-For">>, Req1),
-       {{PeerIp, _PeerPort}, Req3} = peer(Req2),
-       ForwardedFor = case ForwardedForRaw of
-               undefined ->
-                       undefined;
-               ForwardedForRaw ->
-                       case re:run(ForwardedForRaw, "^(?<first_ip>[^\\,]+)",
-                                       [{capture, [first_ip], binary}]) of
-                               {match, [FirstIp]} -> FirstIp;
-                               _Any -> undefined
-                       end
-       end,
-       {ok, PeerAddr} = if
-               is_binary(RealIp) -> inet_parse:address(binary_to_list(RealIp));
-               is_binary(ForwardedFor) -> inet_parse:address(binary_to_list(ForwardedFor));
-               true -> {ok, PeerIp}
-       end,
-       {PeerAddr, Req3}.
-
-%% @doc Return the tokens for the hostname requested.
--spec host(#http_req{}) -> {cowboy_dispatcher:tokens(), #http_req{}}.
-host(Req) ->
-       {Req#http_req.host, Req}.
-
-%% @doc Return the extra host information obtained from partially matching
-%% the hostname using <em>'...'</em>.
--spec host_info(#http_req{})
-       -> {cowboy_dispatcher:tokens() | undefined, #http_req{}}.
-host_info(Req) ->
-       {Req#http_req.host_info, Req}.
-
-%% @doc Return the raw host directly taken from the request.
--spec raw_host(#http_req{}) -> {binary(), #http_req{}}.
-raw_host(Req) ->
-       {Req#http_req.raw_host, Req}.
-
-%% @doc Return the port used for this request.
--spec port(#http_req{}) -> {inet:ip_port(), #http_req{}}.
-port(Req) ->
-       {Req#http_req.port, Req}.
-
-%% @doc Return the path segments for the path requested.
-%%
-%% Following RFC2396, this function may return path segments containing any
-%% character, including <em>/</em> if, and only if, a <em>/</em> was escaped
-%% and part of a path segment in the path requested.
--spec path(#http_req{}) -> {cowboy_dispatcher:tokens(), #http_req{}}.
-path(Req) ->
-       {Req#http_req.path, Req}.
-
-%% @doc Return the extra path information obtained from partially matching
-%% the patch using <em>'...'</em>.
--spec path_info(#http_req{})
-       -> {cowboy_dispatcher:tokens() | undefined, #http_req{}}.
-path_info(Req) ->
-       {Req#http_req.path_info, Req}.
-
-%% @doc Return the raw path directly taken from the request.
--spec raw_path(#http_req{}) -> {binary(), #http_req{}}.
-raw_path(Req) ->
-       {Req#http_req.raw_path, Req}.
-
-%% @equiv qs_val(Name, Req, undefined)
--spec qs_val(binary(), #http_req{})
-       -> {binary() | true | undefined, #http_req{}}.
-qs_val(Name, Req) when is_binary(Name) ->
-       qs_val(Name, Req, undefined).
-
-%% @doc Return the query string value for the given key, or a default if
-%% missing.
-qs_val(Name, Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
-               urldecode={URLDecFun, URLDecArg}}, Default) when is_binary(Name) ->
-       QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
-       qs_val(Name, Req#http_req{qs_vals=QsVals}, Default);
-qs_val(Name, Req, Default) ->
-       case lists:keyfind(Name, 1, Req#http_req.qs_vals) of
-               {Name, Value} -> {Value, Req};
-               false -> {Default, Req}
-       end.
-
-%% @doc Return the full list of query string values.
--spec qs_vals(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
-qs_vals(Req=#http_req{raw_qs=RawQs, qs_vals=undefined,
-               urldecode={URLDecFun, URLDecArg}}) ->
-       QsVals = parse_qs(RawQs, fun(Bin) -> URLDecFun(Bin, URLDecArg) end),
-       qs_vals(Req#http_req{qs_vals=QsVals});
-qs_vals(Req=#http_req{qs_vals=QsVals}) ->
-       {QsVals, Req}.
-
-%% @doc Return the raw query string directly taken from the request.
--spec raw_qs(#http_req{}) -> {binary(), #http_req{}}.
-raw_qs(Req) ->
-       {Req#http_req.raw_qs, Req}.
-
-%% @equiv binding(Name, Req, undefined)
--spec binding(atom(), #http_req{}) -> {binary() | undefined, #http_req{}}.
-binding(Name, Req) when is_atom(Name) ->
-       binding(Name, Req, undefined).
-
-%% @doc Return the binding value for the given key obtained when matching
-%% the host and path against the dispatch list, or a default if missing.
-binding(Name, Req, Default) when is_atom(Name) ->
-       case lists:keyfind(Name, 1, Req#http_req.bindings) of
-               {Name, Value} -> {Value, Req};
-               false -> {Default, Req}
-       end.
-
-%% @doc Return the full list of binding values.
--spec bindings(#http_req{}) -> {list({atom(), binary()}), #http_req{}}.
-bindings(Req) ->
-       {Req#http_req.bindings, Req}.
-
-%% @equiv header(Name, Req, undefined)
--spec header(atom() | binary(), #http_req{})
-       -> {binary() | undefined, #http_req{}}.
-header(Name, Req) when is_atom(Name) orelse is_binary(Name) ->
-       header(Name, Req, undefined).
-
-%% @doc Return the header value for the given key, or a default if missing.
-header(Name, Req, Default) when is_atom(Name) orelse is_binary(Name) ->
-       case lists:keyfind(Name, 1, Req#http_req.headers) of
-               {Name, Value} -> {Value, Req};
-               false -> {Default, Req}
-       end.
-
-%% @doc Return the full list of headers.
--spec headers(#http_req{}) -> {cowboy_http:headers(), #http_req{}}.
-headers(Req) ->
-       {Req#http_req.headers, Req}.
-
-%% @doc Semantically parse headers.
-%%
-%% When the value isn't found, a proper default value for the type
-%% returned is used as a return value.
-%% @see parse_header/3
--spec parse_header(cowboy_http:header(), #http_req{})
-       -> {any(), #http_req{}} | {error, badarg}.
-parse_header(Name, Req=#http_req{p_headers=PHeaders}) ->
-       case lists:keyfind(Name, 1, PHeaders) of
-               false -> parse_header(Name, Req, parse_header_default(Name));
-               {Name, Value} -> {Value, Req}
-       end.
-
-%% @doc Default values for semantic header parsing.
--spec parse_header_default(cowboy_http:header()) -> any().
-parse_header_default('Connection') -> [];
-parse_header_default(_Name) -> undefined.
-
-%% @doc Semantically parse headers.
-%%
-%% When the header is unknown, the value is returned directly without parsing.
--spec parse_header(cowboy_http:header(), #http_req{}, any())
-       -> {any(), #http_req{}} | {error, badarg}.
-parse_header(Name, Req, Default) when Name =:= 'Accept' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:list(Value, fun cowboy_http:media_range/2)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Accept-Charset' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:nonempty_list(Value, fun cowboy_http:conneg/2)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Accept-Encoding' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:list(Value, fun cowboy_http:conneg/2)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Accept-Language' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:nonempty_list(Value, fun cowboy_http:language_range/2)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Connection' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Content-Length' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:digits(Value)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Content-Type' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:content_type(Value)
-               end);
-parse_header(Name, Req, Default)
-               when Name =:= 'If-Match'; Name =:= 'If-None-Match' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:entity_tag_match(Value)
-               end);
-parse_header(Name, Req, Default)
-               when Name =:= 'If-Modified-Since'; Name =:= 'If-Unmodified-Since' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:http_date(Value)
-               end);
-parse_header(Name, Req, Default) when Name =:= 'Upgrade' ->
-       parse_header(Name, Req, Default,
-               fun (Value) ->
-                       cowboy_http:nonempty_list(Value, fun cowboy_http:token_ci/2)
-               end);
-parse_header(Name, Req, Default) ->
-       {Value, Req2} = header(Name, Req, Default),
-       {undefined, Value, Req2}.
-
-parse_header(Name, Req=#http_req{p_headers=PHeaders}, Default, Fun) ->
-       case header(Name, Req) of
-               {undefined, Req2} ->
-                       {Default, Req2#http_req{p_headers=[{Name, Default}|PHeaders]}};
-               {Value, Req2} ->
-                       case Fun(Value) of
-                               {error, badarg} ->
-                                       {error, badarg};
-                               P ->
-                                       {P, Req2#http_req{p_headers=[{Name, P}|PHeaders]}}
-                       end
-       end.
-
-%% @equiv cookie(Name, Req, undefined)
--spec cookie(binary(), #http_req{})
-       -> {binary() | true | undefined, #http_req{}}.
-cookie(Name, Req) when is_binary(Name) ->
-       cookie(Name, Req, undefined).
-
-%% @doc Return the cookie value for the given key, or a default if
-%% missing.
-cookie(Name, Req=#http_req{cookies=undefined}, Default) when is_binary(Name) ->
-       case header('Cookie', Req) of
-               {undefined, Req2} ->
-                       {Default, Req2#http_req{cookies=[]}};
-               {RawCookie, Req2} ->
-                       Cookies = cowboy_cookies:parse_cookie(RawCookie),
-                       cookie(Name, Req2#http_req{cookies=Cookies}, Default)
-       end;
-cookie(Name, Req, Default) ->
-       case lists:keyfind(Name, 1, Req#http_req.cookies) of
-               {Name, Value} -> {Value, Req};
-               false -> {Default, Req}
-       end.
-
-%% @doc Return the full list of cookie values.
--spec cookies(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
-cookies(Req=#http_req{cookies=undefined}) ->
-       case header('Cookie', Req) of
-               {undefined, Req2} ->
-                       {[], Req2#http_req{cookies=[]}};
-               {RawCookie, Req2} ->
-                       Cookies = cowboy_cookies:parse_cookie(RawCookie),
-                       cookies(Req2#http_req{cookies=Cookies})
-       end;
-cookies(Req=#http_req{cookies=Cookies}) ->
-       {Cookies, Req}.
-
-%% @equiv meta(Name, Req, undefined)
--spec meta(atom(), #http_req{}) -> {any() | undefined, #http_req{}}.
-meta(Name, Req) ->
-       meta(Name, Req, undefined).
-
-%% @doc Return metadata information about the request.
-%%
-%% Metadata information varies from one protocol to another. Websockets
-%% would define the protocol version here, while REST would use it to
-%% indicate which media type, language and charset were retained.
--spec meta(atom(), #http_req{}, any()) -> {any(), #http_req{}}.
-meta(Name, Req, Default) ->
-       case lists:keyfind(Name, 1, Req#http_req.meta) of
-               {Name, Value} -> {Value, Req};
-               false -> {Default, Req}
-       end.
-
-%% Request Body API.
-
-%% @doc Return the full body sent with the request, or <em>{error, badarg}</em>
-%% if no <em>Content-Length</em> is available.
-%% @todo We probably want to allow a max length.
-%% @todo Add multipart support to this function.
--spec body(#http_req{}) -> {ok, binary(), #http_req{}} | {error, atom()}.
-body(Req) ->
-       {Length, Req2} = cowboy_http_req:parse_header('Content-Length', Req),
-       case Length of
-               undefined -> {error, badarg};
-               {error, badarg} -> {error, badarg};
-               _Any ->
-                       body(Length, Req2)
-       end.
-
-%% @doc Return <em>Length</em> bytes of the request body.
-%%
-%% You probably shouldn't be calling this function directly, as it expects the
-%% <em>Length</em> argument to be the full size of the body, and will consider
-%% the body to be fully read from the socket.
-%% @todo We probably want to configure the timeout.
--spec body(non_neg_integer(), #http_req{})
-       -> {ok, binary(), #http_req{}} | {error, atom()}.
-body(Length, Req=#http_req{body_state=waiting, buffer=Buffer})
-               when is_integer(Length) andalso Length =< byte_size(Buffer) ->
-       << Body:Length/binary, Rest/bits >> = Buffer,
-       {ok, Body, Req#http_req{body_state=done, buffer=Rest}};
-body(Length, Req=#http_req{socket=Socket, transport=Transport,
-               body_state=waiting, buffer=Buffer}) ->
-       case Transport:recv(Socket, Length - byte_size(Buffer), 5000) of
-               {ok, Body} -> {ok, << Buffer/binary, Body/binary >>,
-                       Req#http_req{body_state=done, buffer= <<>>}};
-               {error, Reason} -> {error, Reason}
-       end.
-
-%% @doc Return the full body sent with the reqest, parsed as an
-%% application/x-www-form-urlencoded string. Essentially a POST query string.
--spec body_qs(#http_req{}) -> {list({binary(), binary() | true}), #http_req{}}.
-body_qs(Req=#http_req{urldecode={URLDecFun, URLDecArg}}) ->
-       {ok, Body, Req2} = body(Req),
-       {parse_qs(Body, fun(Bin) -> URLDecFun(Bin, URLDecArg) end), Req2}.
-
-%% Multipart Request API.
-
-%% @doc Return data from the multipart parser.
-%%
-%% Use this function for multipart streaming. For each part in the request,
-%% this function returns <em>{headers, Headers}</em> followed by a sequence of
-%% <em>{data, Data}</em> tuples and finally <em>end_of_part</em>. When there
-%% is no part to parse anymore, <em>eof</em> is returned.
-%%
-%% If the request Content-Type is not a multipart one, <em>{error, badarg}</em>
-%% is returned.
--spec multipart_data(#http_req{})
-               -> {{headers, cowboy_http:headers()}
-                               | {data, binary()} | end_of_part | eof,
-                       #http_req{}}.
-multipart_data(Req=#http_req{body_state=waiting}) ->
-       {{<<"multipart">>, _SubType, Params}, Req2} =
-               parse_header('Content-Type', Req),
-       {_, Boundary} = lists:keyfind(<<"boundary">>, 1, Params),
-       {Length, Req3=#http_req{buffer=Buffer}} =
-               parse_header('Content-Length', Req2),
-       multipart_data(Req3, Length, cowboy_multipart:parser(Boundary), Buffer);
-multipart_data(Req=#http_req{body_state={multipart, Length, Cont}}) ->
-       multipart_data(Req, Length, Cont());
-multipart_data(Req=#http_req{body_state=done}) ->
-       {eof, Req}.
-
-multipart_data(Req, Length, Parser, Buffer) when byte_size(Buffer) >= Length ->
-       << Data:Length/binary, Rest/binary >> = Buffer,
-       multipart_data(Req#http_req{buffer=Rest}, 0, Parser(Data));
-multipart_data(Req, Length, Parser, Buffer) ->
-       NewLength = Length - byte_size(Buffer),
-       multipart_data(Req#http_req{buffer= <<>>}, NewLength, Parser(Buffer)).
-
-multipart_data(Req, Length, {headers, Headers, Cont}) ->
-       {{headers, Headers}, Req#http_req{body_state={multipart, Length, Cont}}};
-multipart_data(Req, Length, {body, Data, Cont}) ->
-       {{body, Data}, Req#http_req{body_state={multipart, Length, Cont}}};
-multipart_data(Req, Length, {end_of_part, Cont}) ->
-       {end_of_part, Req#http_req{body_state={multipart, Length, Cont}}};
-multipart_data(Req, 0, eof) ->
-       {eof, Req#http_req{body_state=done}};
-multipart_data(Req=#http_req{socket=Socket, transport=Transport},
-               Length, eof) ->
-       {ok, _Data} = Transport:recv(Socket, Length, 5000),
-       {eof, Req#http_req{body_state=done}};
-multipart_data(Req=#http_req{socket=Socket, transport=Transport},
-               Length, {more, Parser}) when Length > 0 ->
-       case Transport:recv(Socket, 0, 5000) of
-               {ok, << Data:Length/binary, Buffer/binary >>} ->
-                       multipart_data(Req#http_req{buffer=Buffer}, 0, Parser(Data));
-               {ok, Data} ->
-                       multipart_data(Req, Length - byte_size(Data), Parser(Data))
-       end.
-
-%% @doc Skip a part returned by the multipart parser.
-%%
-%% This function repeatedly calls <em>multipart_data/1</em> until
-%% <em>end_of_part</em> or <em>eof</em> is parsed.
-multipart_skip(Req) ->
-       case multipart_data(Req) of
-               {end_of_part, Req2} -> {ok, Req2};
-               {eof, Req2} -> {ok, Req2};
-               {_Other, Req2} -> multipart_skip(Req2)
-       end.
-
-%% Response API.
-
-%% @doc Add a cookie header to the response.
--spec set_resp_cookie(binary(), binary(), [cowboy_cookies:cookie_option()],
-       #http_req{}) -> {ok, #http_req{}}.
-set_resp_cookie(Name, Value, Options, Req) ->
-       {HeaderName, HeaderValue} = cowboy_cookies:cookie(Name, Value, Options),
-       set_resp_header(HeaderName, HeaderValue, Req).
-
-%% @doc Add a header to the response.
-set_resp_header(Name, Value, Req=#http_req{resp_headers=RespHeaders}) ->
-       NameBin = header_to_binary(Name),
-       {ok, Req#http_req{resp_headers=[{NameBin, Value}|RespHeaders]}}.
-
-%% @doc Add a body to the response.
-%%
-%% The body set here is ignored if the response is later sent using
-%% anything other than reply/2 or reply/3. The response body is expected
-%% to be a binary or an iolist.
-set_resp_body(Body, Req) ->
-       {ok, Req#http_req{resp_body=Body}}.
-
-
-%% @doc Add a body function to the response.
-%%
-%% The response body may also be set to a content-length - stream-function pair.
-%% If the response body is of this type normal response headers will be sent.
-%% After the response headers has been sent the body function is applied.
-%% The body function is expected to write the response body directly to the
-%% socket using the transport module.
-%%
-%% If the body function crashes while writing the response body or writes fewer
-%% bytes than declared the behaviour is undefined. The body set here is ignored
-%% if the response is later sent using anything other than `reply/2' or
-%% `reply/3'.
-%%
-%% @see cowboy_http_req:transport/1.
--spec set_resp_body_fun(non_neg_integer(), fun(() -> {sent, non_neg_integer()}),
-               #http_req{}) -> {ok, #http_req{}}.
-set_resp_body_fun(StreamLen, StreamFun, Req) ->
-       {ok, Req#http_req{resp_body={StreamLen, StreamFun}}}.
-
-
-%% @doc Return whether the given header has been set for the response.
-has_resp_header(Name, #http_req{resp_headers=RespHeaders}) ->
-       NameBin = header_to_binary(Name),
-       lists:keymember(NameBin, 1, RespHeaders).
-
-%% @doc Return whether a body has been set for the response.
-has_resp_body(#http_req{resp_body={Length, _}}) ->
-       Length > 0;
-has_resp_body(#http_req{resp_body=RespBody}) ->
-       iolist_size(RespBody) > 0.
-
-%% @equiv reply(Status, [], [], Req)
--spec reply(cowboy_http:status(), #http_req{}) -> {ok, #http_req{}}.
-reply(Status, Req=#http_req{resp_body=Body}) ->
-       reply(Status, [], Body, Req).
-
-%% @equiv reply(Status, Headers, [], Req)
--spec reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
-       -> {ok, #http_req{}}.
-reply(Status, Headers, Req=#http_req{resp_body=Body}) ->
-       reply(Status, Headers, Body, Req).
-
-%% @doc Send a reply to the client.
-reply(Status, Headers, Body, Req=#http_req{socket=Socket,
-               transport=Transport, connection=Connection, pid=ReqPid,
-               method=Method, resp_state=waiting, resp_headers=RespHeaders}) ->
-       RespConn = response_connection(Headers, Connection),
-       ContentLen = case Body of {CL, _} -> CL; _ -> iolist_size(Body) end,
-       Head = response_head(Status, Headers, RespHeaders, [
-               {<<"Connection">>, atom_to_connection(Connection)},
-               {<<"Content-Length">>, integer_to_list(ContentLen)},
-               {<<"Date">>, cowboy_clock:rfc1123()},
-               {<<"Server">>, <<"Cowboy">>}
-       ]),
-       case {Method, Body} of
-               {'HEAD', _} -> Transport:send(Socket, Head);
-               {_, {_, StreamFun}} -> Transport:send(Socket, Head), StreamFun();
-               {_, _} -> Transport:send(Socket, [Head, Body])
-       end,
-       ReqPid ! {?MODULE, resp_sent},
-       {ok, Req#http_req{connection=RespConn, resp_state=done,
-               resp_headers=[], resp_body= <<>>}}.
-
-%% @equiv chunked_reply(Status, [], Req)
--spec chunked_reply(cowboy_http:status(), #http_req{}) -> {ok, #http_req{}}.
-chunked_reply(Status, Req) ->
-       chunked_reply(Status, [], Req).
-
-%% @doc Initiate the sending of a chunked reply to the client.
-%% @see cowboy_http_req:chunk/2
--spec chunked_reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
-       -> {ok, #http_req{}}.
-chunked_reply(Status, Headers, Req=#http_req{socket=Socket,
-               transport=Transport, connection=Connection, pid=ReqPid,
-               resp_state=waiting, resp_headers=RespHeaders}) ->
-       RespConn = response_connection(Headers, Connection),
-       Head = response_head(Status, Headers, RespHeaders, [
-               {<<"Connection">>, atom_to_connection(Connection)},
-               {<<"Transfer-Encoding">>, <<"chunked">>},
-               {<<"Date">>, cowboy_clock:rfc1123()},
-               {<<"Server">>, <<"Cowboy">>}
-       ]),
-       Transport:send(Socket, Head),
-       ReqPid ! {?MODULE, resp_sent},
-       {ok, Req#http_req{connection=RespConn, resp_state=chunks,
-               resp_headers=[], resp_body= <<>>}}.
-
-%% @doc Send a chunk of data.
-%%
-%% A chunked reply must have been initiated before calling this function.
-chunk(_Data, #http_req{socket=_Socket, transport=_Transport, method='HEAD'}) ->
-       ok;
-chunk(Data, #http_req{socket=Socket, transport=Transport, resp_state=chunks}) ->
-       Transport:send(Socket, [erlang:integer_to_list(iolist_size(Data), 16),
-               <<"\r\n">>, Data, <<"\r\n">>]).
-
-%% @doc Send an upgrade reply.
-%% @private
--spec upgrade_reply(cowboy_http:status(), cowboy_http:headers(), #http_req{})
-       -> {ok, #http_req{}}.
-upgrade_reply(Status, Headers, Req=#http_req{socket=Socket, transport=Transport,
-               pid=ReqPid, resp_state=waiting, resp_headers=RespHeaders}) ->
-       Head = response_head(Status, Headers, RespHeaders, [
-               {<<"Connection">>, <<"Upgrade">>}
-       ]),
-       Transport:send(Socket, Head),
-       ReqPid ! {?MODULE, resp_sent},
-       {ok, Req#http_req{resp_state=done, resp_headers=[], resp_body= <<>>}}.
-
-%% Misc API.
-
-%% @doc Compact the request data by removing all non-system information.
-%%
-%% This essentially removes the host, path, query string, bindings and headers.
-%% Use it when you really need to save up memory, for example when having
-%% many concurrent long-running connections.
--spec compact(#http_req{}) -> #http_req{}.
-compact(Req) ->
-       Req#http_req{host=undefined, host_info=undefined, path=undefined,
-               path_info=undefined, qs_vals=undefined,
-               bindings=undefined, headers=[],
-               p_headers=[], cookies=[]}.
-
-%% @doc Return the transport module and socket associated with a request.
-%%
-%% This exposes the same socket interface used internally by the HTTP protocol
-%% implementation to developers that needs low level access to the socket.
-%%
-%% It is preferred to use this in conjuction with the stream function support
-%% in `set_resp_body_fun/3' if this is used to write a response body directly
-%% to the socket. This ensures that the response headers are set correctly.
--spec transport(#http_req{}) -> {ok, module(), inet:socket()}.
-transport(#http_req{transport=Transport, socket=Socket}) ->
-       {ok, Transport, Socket}.
-
-%% Internal.
-
--spec parse_qs(binary(), fun((binary()) -> binary())) ->
-               list({binary(), binary() | true}).
-parse_qs(<<>>, _URLDecode) ->
-       [];
-parse_qs(Qs, URLDecode) ->
-       Tokens = binary:split(Qs, <<"&">>, [global, trim]),
-       [case binary:split(Token, <<"=">>) of
-               [Token] -> {URLDecode(Token), true};
-               [Name, Value] -> {URLDecode(Name), URLDecode(Value)}
-       end || Token <- Tokens].
-
--spec response_connection(cowboy_http:headers(), keepalive | close)
-       -> keepalive | close.
-response_connection([], Connection) ->
-       Connection;
-response_connection([{Name, Value}|Tail], Connection) ->
-       case Name of
-               'Connection' -> response_connection_parse(Value);
-               Name when is_atom(Name) -> response_connection(Tail, Connection);
-               Name ->
-                       Name2 = cowboy_bstr:to_lower(Name),
-                       case Name2 of
-                               <<"connection">> -> response_connection_parse(Value);
-                               _Any -> response_connection(Tail, Connection)
-                       end
-       end.
-
--spec response_connection_parse(binary()) -> keepalive | close.
-response_connection_parse(ReplyConn) ->
-       Tokens = cowboy_http:nonempty_list(ReplyConn, fun cowboy_http:token/2),
-       cowboy_http:connection_to_atom(Tokens).
-
--spec response_head(cowboy_http:status(), cowboy_http:headers(),
-       cowboy_http:headers(), cowboy_http:headers()) -> iolist().
-response_head(Status, Headers, RespHeaders, DefaultHeaders) ->
-       StatusLine = <<"HTTP/1.1 ", (status(Status))/binary, "\r\n">>,
-       Headers2 = [{header_to_binary(Key), Value} || {Key, Value} <- Headers],
-       Headers3 = merge_headers(
-               merge_headers(Headers2, RespHeaders),
-               DefaultHeaders),
-       Headers4 = [[Key, <<": ">>, Value, <<"\r\n">>]
-               || {Key, Value} <- Headers3],
-       [StatusLine, Headers4, <<"\r\n">>].
-
--spec merge_headers(cowboy_http:headers(), cowboy_http:headers())
-       -> cowboy_http:headers().
-merge_headers(Headers, []) ->
-       Headers;
-merge_headers(Headers, [{Name, Value}|Tail]) ->
-       Headers2 = case lists:keymember(Name, 1, Headers) of
-               true -> Headers;
-               false -> Headers ++ [{Name, Value}]
-       end,
-       merge_headers(Headers2, Tail).
-
--spec atom_to_connection(keepalive) -> <<_:80>>;
-                                               (close) -> <<_:40>>.
-atom_to_connection(keepalive) ->
-       <<"keep-alive">>;
-atom_to_connection(close) ->
-       <<"close">>.
-
--spec status(cowboy_http:status()) -> binary().
-status(100) -> <<"100 Continue">>;
-status(101) -> <<"101 Switching Protocols">>;
-status(102) -> <<"102 Processing">>;
-status(200) -> <<"200 OK">>;
-status(201) -> <<"201 Created">>;
-status(202) -> <<"202 Accepted">>;
-status(203) -> <<"203 Non-Authoritative Information">>;
-status(204) -> <<"204 No Content">>;
-status(205) -> <<"205 Reset Content">>;
-status(206) -> <<"206 Partial Content">>;
-status(207) -> <<"207 Multi-Status">>;
-status(226) -> <<"226 IM Used">>;
-status(300) -> <<"300 Multiple Choices">>;
-status(301) -> <<"301 Moved Permanently">>;
-status(302) -> <<"302 Found">>;
-status(303) -> <<"303 See Other">>;
-status(304) -> <<"304 Not Modified">>;
-status(305) -> <<"305 Use Proxy">>;
-status(306) -> <<"306 Switch Proxy">>;
-status(307) -> <<"307 Temporary Redirect">>;
-status(400) -> <<"400 Bad Request">>;
-status(401) -> <<"401 Unauthorized">>;
-status(402) -> <<"402 Payment Required">>;
-status(403) -> <<"403 Forbidden">>;
-status(404) -> <<"404 Not Found">>;
-status(405) -> <<"405 Method Not Allowed">>;
-status(406) -> <<"406 Not Acceptable">>;
-status(407) -> <<"407 Proxy Authentication Required">>;
-status(408) -> <<"408 Request Timeout">>;
-status(409) -> <<"409 Conflict">>;
-status(410) -> <<"410 Gone">>;
-status(411) -> <<"411 Length Required">>;
-status(412) -> <<"412 Precondition Failed">>;
-status(413) -> <<"413 Request Entity Too Large">>;
-status(414) -> <<"414 Request-URI Too Long">>;
-status(415) -> <<"415 Unsupported Media Type">>;
-status(416) -> <<"416 Requested Range Not Satisfiable">>;
-status(417) -> <<"417 Expectation Failed">>;
-status(418) -> <<"418 I'm a teapot">>;
-status(422) -> <<"422 Unprocessable Entity">>;
-status(423) -> <<"423 Locked">>;
-status(424) -> <<"424 Failed Dependency">>;
-status(425) -> <<"425 Unordered Collection">>;
-status(426) -> <<"426 Upgrade Required">>;
-status(500) -> <<"500 Internal Server Error">>;
-status(501) -> <<"501 Not Implemented">>;
-status(502) -> <<"502 Bad Gateway">>;
-status(503) -> <<"503 Service Unavailable">>;
-status(504) -> <<"504 Gateway Timeout">>;
-status(505) -> <<"505 HTTP Version Not Supported">>;
-status(506) -> <<"506 Variant Also Negotiates">>;
-status(507) -> <<"507 Insufficient Storage">>;
-status(510) -> <<"510 Not Extended">>;
-status(B) when is_binary(B) -> B.
-
--spec header_to_binary(cowboy_http:header()) -> binary().
-header_to_binary('Cache-Control') -> <<"Cache-Control">>;
-header_to_binary('Connection') -> <<"Connection">>;
-header_to_binary('Date') -> <<"Date">>;
-header_to_binary('Pragma') -> <<"Pragma">>;
-header_to_binary('Transfer-Encoding') -> <<"Transfer-Encoding">>;
-header_to_binary('Upgrade') -> <<"Upgrade">>;
-header_to_binary('Via') -> <<"Via">>;
-header_to_binary('Accept') -> <<"Accept">>;
-header_to_binary('Accept-Charset') -> <<"Accept-Charset">>;
-header_to_binary('Accept-Encoding') -> <<"Accept-Encoding">>;
-header_to_binary('Accept-Language') -> <<"Accept-Language">>;
-header_to_binary('Authorization') -> <<"Authorization">>;
-header_to_binary('From') -> <<"From">>;
-header_to_binary('Host') -> <<"Host">>;
-header_to_binary('If-Modified-Since') -> <<"If-Modified-Since">>;
-header_to_binary('If-Match') -> <<"If-Match">>;
-header_to_binary('If-None-Match') -> <<"If-None-Match">>;
-header_to_binary('If-Range') -> <<"If-Range">>;
-header_to_binary('If-Unmodified-Since') -> <<"If-Unmodified-Since">>;
-header_to_binary('Max-Forwards') -> <<"Max-Forwards">>;
-header_to_binary('Proxy-Authorization') -> <<"Proxy-Authorization">>;
-header_to_binary('Range') -> <<"Range">>;
-header_to_binary('Referer') -> <<"Referer">>;
-header_to_binary('User-Agent') -> <<"User-Agent">>;
-header_to_binary('Age') -> <<"Age">>;
-header_to_binary('Location') -> <<"Location">>;
-header_to_binary('Proxy-Authenticate') -> <<"Proxy-Authenticate">>;
-header_to_binary('Public') -> <<"Public">>;
-header_to_binary('Retry-After') -> <<"Retry-After">>;
-header_to_binary('Server') -> <<"Server">>;
-header_to_binary('Vary') -> <<"Vary">>;
-header_to_binary('Warning') -> <<"Warning">>;
-header_to_binary('Www-Authenticate') -> <<"Www-Authenticate">>;
-header_to_binary('Allow') -> <<"Allow">>;
-header_to_binary('Content-Base') -> <<"Content-Base">>;
-header_to_binary('Content-Encoding') -> <<"Content-Encoding">>;
-header_to_binary('Content-Language') -> <<"Content-Language">>;
-header_to_binary('Content-Length') -> <<"Content-Length">>;
-header_to_binary('Content-Location') -> <<"Content-Location">>;
-header_to_binary('Content-Md5') -> <<"Content-Md5">>;
-header_to_binary('Content-Range') -> <<"Content-Range">>;
-header_to_binary('Content-Type') -> <<"Content-Type">>;
-header_to_binary('Etag') -> <<"Etag">>;
-header_to_binary('Expires') -> <<"Expires">>;
-header_to_binary('Last-Modified') -> <<"Last-Modified">>;
-header_to_binary('Accept-Ranges') -> <<"Accept-Ranges">>;
-header_to_binary('Set-Cookie') -> <<"Set-Cookie">>;
-header_to_binary('Set-Cookie2') -> <<"Set-Cookie2">>;
-header_to_binary('X-Forwarded-For') -> <<"X-Forwarded-For">>;
-header_to_binary('Cookie') -> <<"Cookie">>;
-header_to_binary('Keep-Alive') -> <<"Keep-Alive">>;
-header_to_binary('Proxy-Connection') -> <<"Proxy-Connection">>;
-header_to_binary(B) when is_binary(B) -> B.
-
-%% Tests.
-
--ifdef(TEST).
-
-parse_qs_test_() ->
-       %% {Qs, Result}
-       Tests = [
-               {<<"">>, []},
-               {<<"a=b">>, [{<<"a">>, <<"b">>}]},
-               {<<"aaa=bbb">>, [{<<"aaa">>, <<"bbb">>}]},
-               {<<"a&b">>, [{<<"a">>, true}, {<<"b">>, true}]},
-               {<<"a=b&c&d=e">>, [{<<"a">>, <<"b">>},
-                       {<<"c">>, true}, {<<"d">>, <<"e">>}]},
-               {<<"a=b=c=d=e&f=g">>, [{<<"a">>, <<"b=c=d=e">>}, {<<"f">>, <<"g">>}]},
-               {<<"a+b=c+d">>, [{<<"a b">>, <<"c d">>}]}
-       ],
-       URLDecode = fun cowboy_http:urldecode/1,
-       [{Qs, fun() -> R = parse_qs(Qs, URLDecode) end} || {Qs, R} <- Tests].
-
--endif.
index a26d446ababd9c0ddc2dc2083c81ea40d1ba9f08..b76c9a5bc7ba485d145bef97836c9fd7dc9dd270 100644 (file)
@@ -227,6 +227,9 @@ INCLUDE_DIRS+=$(UPSTREAM_INCLUDE_DIRS)
 
 define package_rules
 
+# We use --no-backup-if-mismatch to prevent .orig files ending up in
+# source builds and causing warnings on Debian if the patches have
+# fuzz.
 ifdef UPSTREAM_GIT
 $(CLONE_DIR)/.done:
        rm -rf $(CLONE_DIR)
@@ -234,7 +237,8 @@ $(CLONE_DIR)/.done:
        # Work around weird github breakage (bug 25264)
        cd $(CLONE_DIR) && git pull
        $(if $(UPSTREAM_REVISION),cd $(CLONE_DIR) && git checkout $(UPSTREAM_REVISION))
-       $(if $(WRAPPER_PATCHES),$(foreach F,$(WRAPPER_PATCHES),patch -d $(CLONE_DIR) -p1 <$(PACKAGE_DIR)/$(F) &&) :)
+       $(if $(WRAPPER_PATCHES),$(foreach F,$(WRAPPER_PATCHES),patch -E -z .umbrella-orig -d $(CLONE_DIR) -p1 <$(PACKAGE_DIR)/$(F) &&) :)
+       find $(CLONE_DIR) -name "*.umbrella-orig" -delete
        touch $$@
 endif # UPSTREAM_GIT
 
@@ -242,7 +246,8 @@ ifdef UPSTREAM_HG
 $(CLONE_DIR)/.done:
        rm -rf $(CLONE_DIR)
        hg clone -r $(or $(UPSTREAM_REVISION),default) $(UPSTREAM_HG) $(CLONE_DIR)
-       $(if $(WRAPPER_PATCHES),$(foreach F,$(WRAPPER_PATCHES),patch -d $(CLONE_DIR) -p1 <$(PACKAGE_DIR)/$(F) &&) :)
+       $(if $(WRAPPER_PATCHES),$(foreach F,$(WRAPPER_PATCHES),patch -E -z .umbrella-orig -d $(CLONE_DIR) -p1 <$(PACKAGE_DIR)/$(F) &&) :)
+       find $(CLONE_DIR) -name "*.umbrella-orig" -delete
        touch $$@
 endif # UPSTREAM_HG
 
@@ -299,16 +304,14 @@ define run_broker
        cp -p $(PACKAGE_DIR)/dist/*.ez $(TEST_TMPDIR)/plugins
        $(call copy,$(3),$(TEST_TMPDIR)/plugins)
        rm -f $(TEST_TMPDIR)/plugins/rabbit_common*.ez
-       for plugin in \
-         $$$$(RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
-            RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
-           $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmq-plugins list -m); do \
-           RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+       RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
            RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
            $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmq-plugins \
-           enable $$$$plugin; \
-       done
-       RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+           set --offline $$$$(RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
+            RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
+           $(UMBRELLA_BASE_DIR)/rabbitmq-server/scripts/rabbitmq-plugins list -m | tr '\n' ' ')
+       MAKE="$(MAKE)" \
+         RABBITMQ_PLUGINS_DIR=$(TEST_TMPDIR)/plugins \
          RABBITMQ_ENABLED_PLUGINS_FILE=$(TEST_TMPDIR)/enabled_plugins \
          RABBITMQ_LOG_BASE=$(TEST_TMPDIR)/log \
          RABBITMQ_MNESIA_BASE=$(TEST_TMPDIR)/$(NODENAME) \
@@ -337,13 +340,14 @@ define run_with_broker_tests_aux
                | $(ERL_CALL) $(ERL_CALL_OPTS) \
                | tee -a $(TEST_TMPDIR)/rabbit-test-output \
                | egrep "{ok, (ok|passed)}" >/dev/null &&) \
-           $(foreach SCRIPT,$(WITH_BROKER_TEST_SCRIPTS),$(SCRIPT) &&) : ; \
+           MAKE="$(MAKE)" RABBITMQ_NODENAME="$(NODENAME)" \
+             $(foreach SCRIPT,$(WITH_BROKER_TEST_SCRIPTS),$(SCRIPT) &&) : ; \
         then \
          touch $(TEST_TMPDIR)/.passed ; \
-         echo "\nPASSED\n" ; \
+         printf "\nPASSED\n" ; \
        else \
          cat $(TEST_TMPDIR)/rabbit-test-output ; \
-         echo "\n\nFAILED\n" ; \
+         printf "\n\nFAILED\n" ; \
        fi
        sleep 1
        echo "rabbit_misc:report_cover(), init:stop()." | $(ERL_CALL) $(ERL_CALL_OPTS)
diff --git a/rabbitmq-server/plugins-src/eldap-wrapper/CONTRIBUTING.md b/rabbitmq-server/plugins-src/eldap-wrapper/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/plugins-src/mochiweb-wrapper/CONTRIBUTING.md b/rabbitmq-server/plugins-src/mochiweb-wrapper/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/plugins-src/mochiweb-wrapper/mochiweb-git/src/mochiweb_request.erl.orig b/rabbitmq-server/plugins-src/mochiweb-wrapper/mochiweb-git/src/mochiweb_request.erl.orig
deleted file mode 100644 (file)
index 0fea1eb..0000000
+++ /dev/null
@@ -1,857 +0,0 @@
-%% @author Bob Ippolito <bob@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc MochiWeb HTTP Request abstraction.
-
--module(mochiweb_request).
--author('bob@mochimedia.com').
-
--include_lib("kernel/include/file.hrl").
--include("internal.hrl").
-
--define(QUIP, "Any of you quaids got a smint?").
-
--export([new/5]).
--export([get_header_value/2, get_primary_header_value/2, get_combined_header_value/2, get/2, dump/1]).
--export([send/2, recv/2, recv/3, recv_body/1, recv_body/2, stream_body/4]).
--export([start_response/2, start_response_length/2, start_raw_response/2]).
--export([respond/2, ok/2]).
--export([not_found/1, not_found/2]).
--export([parse_post/1, parse_qs/1]).
--export([should_close/1, cleanup/1]).
--export([parse_cookie/1, get_cookie_value/2]).
--export([serve_file/3, serve_file/4]).
--export([accepted_encodings/2]).
--export([accepts_content_type/2, accepted_content_types/2]).
-
--define(SAVE_QS, mochiweb_request_qs).
--define(SAVE_PATH, mochiweb_request_path).
--define(SAVE_RECV, mochiweb_request_recv).
--define(SAVE_BODY, mochiweb_request_body).
--define(SAVE_BODY_LENGTH, mochiweb_request_body_length).
--define(SAVE_POST, mochiweb_request_post).
--define(SAVE_COOKIE, mochiweb_request_cookie).
--define(SAVE_FORCE_CLOSE, mochiweb_request_force_close).
-
-%% @type key() = atom() | string() | binary()
-%% @type value() = atom() | string() | binary() | integer()
-%% @type headers(). A mochiweb_headers structure.
-%% @type request(). A mochiweb_request parameterized module instance.
-%% @type response(). A mochiweb_response parameterized module instance.
-%% @type ioheaders() = headers() | [{key(), value()}].
-
-% 5 minute default idle timeout
--define(IDLE_TIMEOUT, 300000).
-
-% Maximum recv_body() length of 1MB
--define(MAX_RECV_BODY, (1024*1024)).
-
-%% @spec new(Socket, Method, RawPath, Version, headers()) -> request()
-%% @doc Create a new request instance.
-new(Socket, Method, RawPath, Version, Headers) ->
-    {?MODULE, [Socket, Method, RawPath, Version, Headers]}.
-
-%% @spec get_header_value(K, request()) -> undefined | Value
-%% @doc Get the value of a given request header.
-get_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
-    mochiweb_headers:get_value(K, Headers).
-
-get_primary_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
-    mochiweb_headers:get_primary_value(K, Headers).
-
-get_combined_header_value(K, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
-    mochiweb_headers:get_combined_value(K, Headers).
-
-%% @type field() = socket | scheme | method | raw_path | version | headers | peer | path | body_length | range
-
-%% @spec get(field(), request()) -> term()
-%% @doc Return the internal representation of the given field. If
-%%      <code>socket</code> is requested on a HTTPS connection, then
-%%      an ssl socket will be returned as <code>{ssl, SslSocket}</code>.
-%%      You can use <code>SslSocket</code> with the <code>ssl</code>
-%%      application, eg: <code>ssl:peercert(SslSocket)</code>.
-get(socket, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    Socket;
-get(scheme, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    case mochiweb_socket:type(Socket) of
-        plain ->
-            http;
-        ssl ->
-            https
-    end;
-get(method, {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}) ->
-    Method;
-get(raw_path, {?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
-    RawPath;
-get(version, {?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}) ->
-    Version;
-get(headers, {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}) ->
-    Headers;
-get(peer, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case mochiweb_socket:peername(Socket) of
-        {ok, {Addr={10, _, _, _}, _Port}} ->
-            case get_header_value("x-forwarded-for", THIS) of
-                undefined ->
-                    inet_parse:ntoa(Addr);
-                Hosts ->
-                    string:strip(lists:last(string:tokens(Hosts, ",")))
-            end;
-        {ok, {{127, 0, 0, 1}, _Port}} ->
-            case get_header_value("x-forwarded-for", THIS) of
-                undefined ->
-                    "127.0.0.1";
-                Hosts ->
-                    string:strip(lists:last(string:tokens(Hosts, ",")))
-            end;
-        {ok, {Addr, _Port}} ->
-            inet_parse:ntoa(Addr);
-        {error, enotconn} ->
-            exit(normal)
-    end;
-get(path, {?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
-    case erlang:get(?SAVE_PATH) of
-        undefined ->
-            {Path0, _, _} = mochiweb_util:urlsplit_path(RawPath),
-            Path = mochiweb_util:unquote(Path0),
-            put(?SAVE_PATH, Path),
-            Path;
-        Cached ->
-            Cached
-    end;
-get(body_length, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case erlang:get(?SAVE_BODY_LENGTH) of
-        undefined ->
-            BodyLength = body_length(THIS),
-            put(?SAVE_BODY_LENGTH, {cached, BodyLength}),
-            BodyLength;
-        {cached, Cached} ->
-            Cached
-    end;
-get(range, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case get_header_value(range, THIS) of
-        undefined ->
-            undefined;
-        RawRange ->
-            mochiweb_http:parse_range_request(RawRange)
-    end.
-
-%% @spec dump(request()) -> {mochiweb_request, [{atom(), term()}]}
-%% @doc Dump the internal representation to a "human readable" set of terms
-%%      for debugging/inspection purposes.
-dump({?MODULE, [_Socket, Method, RawPath, Version, Headers]}) ->
-    {?MODULE, [{method, Method},
-               {version, Version},
-               {raw_path, RawPath},
-               {headers, mochiweb_headers:to_list(Headers)}]}.
-
-%% @spec send(iodata(), request()) -> ok
-%% @doc Send data over the socket.
-send(Data, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    case mochiweb_socket:send(Socket, Data) of
-        ok ->
-            ok;
-        _ ->
-            exit(normal)
-    end.
-
-%% @spec recv(integer(), request()) -> binary()
-%% @doc Receive Length bytes from the client as a binary, with the default
-%%      idle timeout.
-recv(Length, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    recv(Length, ?IDLE_TIMEOUT, THIS).
-
-%% @spec recv(integer(), integer(), request()) -> binary()
-%% @doc Receive Length bytes from the client as a binary, with the given
-%%      Timeout in msec.
-recv(Length, Timeout, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    case mochiweb_socket:recv(Socket, Length, Timeout) of
-        {ok, Data} ->
-            put(?SAVE_RECV, true),
-            Data;
-        _ ->
-            exit(normal)
-    end.
-
-%% @spec body_length(request()) -> undefined | chunked | unknown_transfer_encoding | integer()
-%% @doc  Infer body length from transfer-encoding and content-length headers.
-body_length({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case get_header_value("transfer-encoding", THIS) of
-        undefined ->
-            case get_combined_header_value("content-length", THIS) of
-                undefined ->
-                    undefined;
-                Length ->
-                    list_to_integer(Length)
-            end;
-        "chunked" ->
-            chunked;
-        Unknown ->
-            {unknown_transfer_encoding, Unknown}
-    end.
-
-
-%% @spec recv_body(request()) -> binary()
-%% @doc Receive the body of the HTTP request (defined by Content-Length).
-%%      Will only receive up to the default max-body length of 1MB.
-recv_body({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    recv_body(?MAX_RECV_BODY, THIS).
-
-%% @spec recv_body(integer(), request()) -> binary()
-%% @doc Receive the body of the HTTP request (defined by Content-Length).
-%%      Will receive up to MaxBody bytes.
-recv_body(MaxBody, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case erlang:get(?SAVE_BODY) of
-        undefined ->
-            % we could use a sane constant for max chunk size
-            Body = stream_body(?MAX_RECV_BODY, fun
-                ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
-                    iolist_to_binary(lists:reverse(BinAcc));
-                ({Length, Bin}, {LengthAcc, BinAcc}) ->
-                    NewLength = Length + LengthAcc,
-                    if NewLength > MaxBody ->
-                        exit({body_too_large, chunked});
-                    true ->
-                        {NewLength, [Bin | BinAcc]}
-                    end
-                end, {0, []}, MaxBody, THIS),
-            put(?SAVE_BODY, Body),
-            Body;
-        Cached -> Cached
-    end.
-
-stream_body(MaxChunkSize, ChunkFun, FunState, {?MODULE,[_Socket,_Method,_RawPath,_Version,_Headers]}=THIS) ->
-    stream_body(MaxChunkSize, ChunkFun, FunState, undefined, THIS).
-
-stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength,
-            {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    Expect = case get_header_value("expect", THIS) of
-                 undefined ->
-                     undefined;
-                 Value when is_list(Value) ->
-                     string:to_lower(Value)
-             end,
-    case Expect of
-        "100-continue" ->
-            _ = start_raw_response({100, gb_trees:empty()}, THIS),
-            ok;
-        _Else ->
-            ok
-    end,
-    case body_length(THIS) of
-        undefined ->
-            undefined;
-        {unknown_transfer_encoding, Unknown} ->
-            exit({unknown_transfer_encoding, Unknown});
-        chunked ->
-            % In this case the MaxBody is actually used to
-            % determine the maximum allowed size of a single
-            % chunk.
-            stream_chunked_body(MaxChunkSize, ChunkFun, FunState, THIS);
-        0 ->
-            <<>>;
-        Length when is_integer(Length) ->
-            case MaxBodyLength of
-            MaxBodyLength when is_integer(MaxBodyLength), MaxBodyLength < Length ->
-                exit({body_too_large, content_length});
-            _ ->
-                stream_unchunked_body(Length, ChunkFun, FunState, THIS)
-            end
-    end.
-
-
-%% @spec start_response({integer(), ioheaders()}, request()) -> response()
-%% @doc Start the HTTP response by sending the Code HTTP response and
-%%      ResponseHeaders. The server will set header defaults such as Server
-%%      and Date if not present in ResponseHeaders.
-start_response({Code, ResponseHeaders}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    HResponse1 = mochiweb_headers:default_from_list(server_headers(),
-                                                    HResponse),
-    start_raw_response({Code, HResponse1}, THIS).
-
-%% @spec start_raw_response({integer(), headers()}, request()) -> response()
-%% @doc Start the HTTP response by sending the Code HTTP response and
-%%      ResponseHeaders.
-start_raw_response({Code, ResponseHeaders}, {?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}=THIS) ->
-    F = fun ({K, V}, Acc) ->
-                [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
-        end,
-    End = lists:foldl(F, [<<"\r\n">>],
-                      mochiweb_headers:to_list(ResponseHeaders)),
-    send([make_version(Version), make_code(Code), <<"\r\n">> | End], THIS),
-    mochiweb:new_response({THIS, Code, ResponseHeaders}).
-
-
-%% @spec start_response_length({integer(), ioheaders(), integer()}, request()) -> response()
-%% @doc Start the HTTP response by sending the Code HTTP response and
-%%      ResponseHeaders including a Content-Length of Length. The server
-%%      will set header defaults such as Server
-%%      and Date if not present in ResponseHeaders.
-start_response_length({Code, ResponseHeaders, Length},
-                      {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    HResponse1 = mochiweb_headers:enter("Content-Length", Length, HResponse),
-    start_response({Code, HResponse1}, THIS).
-
-%% @spec respond({integer(), ioheaders(), iodata() | chunked | {file, IoDevice}}, request()) -> response()
-%% @doc Start the HTTP response with start_response, and send Body to the
-%%      client (if the get(method) /= 'HEAD'). The Content-Length header
-%%      will be set by the Body length, and the server will insert header
-%%      defaults.
-respond({Code, ResponseHeaders, {file, IoDevice}},
-        {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}=THIS) ->
-    Length = mochiweb_io:iodevice_size(IoDevice),
-    Response = start_response_length({Code, ResponseHeaders, Length}, THIS),
-    case Method of
-        'HEAD' ->
-            ok;
-        _ ->
-            mochiweb_io:iodevice_stream(
-              fun (Body) -> send(Body, THIS) end,
-              IoDevice)
-    end,
-    Response;
-respond({Code, ResponseHeaders, chunked}, {?MODULE, [_Socket, Method, _RawPath, Version, _Headers]}=THIS) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    HResponse1 = case Method of
-                     'HEAD' ->
-                         %% This is what Google does, http://www.google.com/
-                         %% is chunked but HEAD gets Content-Length: 0.
-                         %% The RFC is ambiguous so emulating Google is smart.
-                         mochiweb_headers:enter("Content-Length", "0",
-                                                HResponse);
-                     _ when Version >= {1, 1} ->
-                         %% Only use chunked encoding for HTTP/1.1
-                         mochiweb_headers:enter("Transfer-Encoding", "chunked",
-                                                HResponse);
-                     _ ->
-                         %% For pre-1.1 clients we send the data as-is
-                         %% without a Content-Length header and without
-                         %% chunk delimiters. Since the end of the document
-                         %% is now ambiguous we must force a close.
-                         put(?SAVE_FORCE_CLOSE, true),
-                         HResponse
-                 end,
-    start_response({Code, HResponse1}, THIS);
-respond({Code, ResponseHeaders, Body}, {?MODULE, [_Socket, Method, _RawPath, _Version, _Headers]}=THIS) ->
-    Response = start_response_length({Code, ResponseHeaders, iolist_size(Body)}, THIS),
-    case Method of
-        'HEAD' ->
-            ok;
-        _ ->
-            send(Body, THIS)
-    end,
-    Response.
-
-%% @spec not_found(request()) -> response()
-%% @doc Alias for <code>not_found([])</code>.
-not_found({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    not_found([], THIS).
-
-%% @spec not_found(ExtraHeaders, request()) -> response()
-%% @doc Alias for <code>respond({404, [{"Content-Type", "text/plain"}
-%% | ExtraHeaders], &lt;&lt;"Not found."&gt;&gt;})</code>.
-not_found(ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    respond({404, [{"Content-Type", "text/plain"} | ExtraHeaders],
-             <<"Not found.">>}, THIS).
-
-%% @spec ok({value(), iodata()} | {value(), ioheaders(), iodata() | {file, IoDevice}}, request()) ->
-%%           response()
-%% @doc respond({200, [{"Content-Type", ContentType} | Headers], Body}).
-ok({ContentType, Body}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    ok({ContentType, [], Body}, THIS);
-ok({ContentType, ResponseHeaders, Body}, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    HResponse = mochiweb_headers:make(ResponseHeaders),
-    case THIS:get(range) of
-        X when (X =:= undefined orelse X =:= fail) orelse Body =:= chunked ->
-            %% http://code.google.com/p/mochiweb/issues/detail?id=54
-            %% Range header not supported when chunked, return 200 and provide
-            %% full response.
-            HResponse1 = mochiweb_headers:enter("Content-Type", ContentType,
-                                                HResponse),
-            respond({200, HResponse1, Body}, THIS);
-        Ranges ->
-            {PartList, Size} = range_parts(Body, Ranges),
-            case PartList of
-                [] -> %% no valid ranges
-                    HResponse1 = mochiweb_headers:enter("Content-Type",
-                                                        ContentType,
-                                                        HResponse),
-                    %% could be 416, for now we'll just return 200
-                    respond({200, HResponse1, Body}, THIS);
-                PartList ->
-                    {RangeHeaders, RangeBody} =
-                        mochiweb_multipart:parts_to_body(PartList, ContentType, Size),
-                    HResponse1 = mochiweb_headers:enter_from_list(
-                                   [{"Accept-Ranges", "bytes"} |
-                                    RangeHeaders],
-                                   HResponse),
-                    respond({206, HResponse1, RangeBody}, THIS)
-            end
-    end.
-
-%% @spec should_close(request()) -> bool()
-%% @doc Return true if the connection must be closed. If false, using
-%%      Keep-Alive should be safe.
-should_close({?MODULE, [_Socket, _Method, _RawPath, Version, _Headers]}=THIS) ->
-    ForceClose = erlang:get(?SAVE_FORCE_CLOSE) =/= undefined,
-    DidNotRecv = erlang:get(?SAVE_RECV) =:= undefined,
-    ForceClose orelse Version < {1, 0}
-        %% Connection: close
-        orelse is_close(get_header_value("connection", THIS))
-        %% HTTP 1.0 requires Connection: Keep-Alive
-        orelse (Version =:= {1, 0}
-                andalso get_header_value("connection", THIS) =/= "Keep-Alive")
-        %% unread data left on the socket, can't safely continue
-        orelse (DidNotRecv
-                andalso get_combined_header_value("content-length", THIS) =/= undefined
-                andalso list_to_integer(get_combined_header_value("content-length", THIS)) > 0)
-        orelse (DidNotRecv
-                andalso get_header_value("transfer-encoding", THIS) =:= "chunked").
-
-is_close("close") ->
-    true;
-is_close(S=[_C, _L, _O, _S, _E]) ->
-    string:to_lower(S) =:= "close";
-is_close(_) ->
-    false.
-
-%% @spec cleanup(request()) -> ok
-%% @doc Clean up any junk in the process dictionary, required before continuing
-%%      a Keep-Alive request.
-cleanup({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    L = [?SAVE_QS, ?SAVE_PATH, ?SAVE_RECV, ?SAVE_BODY, ?SAVE_BODY_LENGTH,
-         ?SAVE_POST, ?SAVE_COOKIE, ?SAVE_FORCE_CLOSE],
-    lists:foreach(fun(K) ->
-                          erase(K)
-                  end, L),
-    ok.
-
-%% @spec parse_qs(request()) -> [{Key::string(), Value::string()}]
-%% @doc Parse the query string of the URL.
-parse_qs({?MODULE, [_Socket, _Method, RawPath, _Version, _Headers]}) ->
-    case erlang:get(?SAVE_QS) of
-        undefined ->
-            {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
-            Parsed = mochiweb_util:parse_qs(QueryString),
-            put(?SAVE_QS, Parsed),
-            Parsed;
-        Cached ->
-            Cached
-    end.
-
-%% @spec get_cookie_value(Key::string, request()) -> string() | undefined
-%% @doc Get the value of the given cookie.
-get_cookie_value(Key, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    proplists:get_value(Key, parse_cookie(THIS)).
-
-%% @spec parse_cookie(request()) -> [{Key::string(), Value::string()}]
-%% @doc Parse the cookie header.
-parse_cookie({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case erlang:get(?SAVE_COOKIE) of
-        undefined ->
-            Cookies = case get_header_value("cookie", THIS) of
-                          undefined ->
-                              [];
-                          Value ->
-                              mochiweb_cookies:parse_cookie(Value)
-                      end,
-            put(?SAVE_COOKIE, Cookies),
-            Cookies;
-        Cached ->
-            Cached
-    end.
-
-%% @spec parse_post(request()) -> [{Key::string(), Value::string()}]
-%% @doc Parse an application/x-www-form-urlencoded form POST. This
-%%      has the side-effect of calling recv_body().
-parse_post({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case erlang:get(?SAVE_POST) of
-        undefined ->
-            Parsed = case recv_body(THIS) of
-                         undefined ->
-                             [];
-                         Binary ->
-                             case get_primary_header_value("content-type",THIS) of
-                                 "application/x-www-form-urlencoded" ++ _ ->
-                                     mochiweb_util:parse_qs(Binary);
-                                 _ ->
-                                     []
-                             end
-                     end,
-            put(?SAVE_POST, Parsed),
-            Parsed;
-        Cached ->
-            Cached
-    end.
-
-%% @spec stream_chunked_body(integer(), fun(), term(), request()) -> term()
-%% @doc The function is called for each chunk.
-%%      Used internally by read_chunked_body.
-stream_chunked_body(MaxChunkSize, Fun, FunState,
-                    {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case read_chunk_length(THIS) of
-        0 ->
-            Fun({0, read_chunk(0, THIS)}, FunState);
-        Length when Length > MaxChunkSize ->
-            NewState = read_sub_chunks(Length, MaxChunkSize, Fun, FunState, THIS),
-            stream_chunked_body(MaxChunkSize, Fun, NewState, THIS);
-        Length ->
-            NewState = Fun({Length, read_chunk(Length, THIS)}, FunState),
-            stream_chunked_body(MaxChunkSize, Fun, NewState, THIS)
-    end.
-
-stream_unchunked_body(0, Fun, FunState, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    Fun({0, <<>>}, FunState);
-stream_unchunked_body(Length, Fun, FunState,
-                      {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) when Length > 0 ->
-    PktSize = case Length > ?RECBUF_SIZE of
-        true ->
-            ?RECBUF_SIZE;
-        false ->
-            Length
-    end,
-    Bin = recv(PktSize, THIS),
-    NewState = Fun({PktSize, Bin}, FunState),
-    stream_unchunked_body(Length - PktSize, Fun, NewState, THIS).
-
-%% @spec read_chunk_length(request()) -> integer()
-%% @doc Read the length of the next HTTP chunk.
-read_chunk_length({?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    ok = mochiweb_socket:setopts(Socket, [{packet, line}]),
-    case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
-        {ok, Header} ->
-            ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
-            Splitter = fun (C) ->
-                               C =/= $\r andalso C =/= $\n andalso C =/= $
-                       end,
-            {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
-            mochihex:to_int(Hex);
-        _ ->
-            exit(normal)
-    end.
-
-%% @spec read_chunk(integer(), request()) -> Chunk::binary() | [Footer::binary()]
-%% @doc Read in a HTTP chunk of the given length. If Length is 0, then read the
-%%      HTTP footers (as a list of binaries, since they're nominal).
-read_chunk(0, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    ok = mochiweb_socket:setopts(Socket, [{packet, line}]),
-    F = fun (F1, Acc) ->
-                case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
-                    {ok, <<"\r\n">>} ->
-                        Acc;
-                    {ok, Footer} ->
-                        F1(F1, [Footer | Acc]);
-                    _ ->
-                        exit(normal)
-                end
-        end,
-    Footers = F(F, []),
-    ok = mochiweb_socket:setopts(Socket, [{packet, raw}]),
-    put(?SAVE_RECV, true),
-    Footers;
-read_chunk(Length, {?MODULE, [Socket, _Method, _RawPath, _Version, _Headers]}) ->
-    case mochiweb_socket:recv(Socket, 2 + Length, ?IDLE_TIMEOUT) of
-        {ok, <<Chunk:Length/binary, "\r\n">>} ->
-            Chunk;
-        _ ->
-            exit(normal)
-    end.
-
-read_sub_chunks(Length, MaxChunkSize, Fun, FunState,
-                {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) when Length > MaxChunkSize ->
-    Bin = recv(MaxChunkSize, THIS),
-    NewState = Fun({size(Bin), Bin}, FunState),
-    read_sub_chunks(Length - MaxChunkSize, MaxChunkSize, Fun, NewState, THIS);
-
-read_sub_chunks(Length, _MaxChunkSize, Fun, FunState,
-                {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    Fun({Length, read_chunk(Length, THIS)}, FunState).
-
-%% @spec serve_file(Path, DocRoot, request()) -> Response
-%% @doc Serve a file relative to DocRoot.
-serve_file(Path, DocRoot, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    serve_file(Path, DocRoot, [], THIS).
-
-%% @spec serve_file(Path, DocRoot, ExtraHeaders, request()) -> Response
-%% @doc Serve a file relative to DocRoot.
-serve_file(Path, DocRoot, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case mochiweb_util:safe_relative_path(Path) of
-        undefined ->
-            not_found(ExtraHeaders, THIS);
-        RelPath ->
-            FullPath = filename:join([DocRoot, RelPath]),
-            case filelib:is_dir(FullPath) of
-                true ->
-                    maybe_redirect(RelPath, FullPath, ExtraHeaders, THIS);
-                false ->
-                    maybe_serve_file(FullPath, ExtraHeaders, THIS)
-            end
-    end.
-
-%% Internal API
-
-%% This has the same effect as the DirectoryIndex directive in httpd
-directory_index(FullPath) ->
-    filename:join([FullPath, "index.html"]).
-
-maybe_redirect([], FullPath, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    maybe_serve_file(directory_index(FullPath), ExtraHeaders, THIS);
-
-maybe_redirect(RelPath, FullPath, ExtraHeaders,
-               {?MODULE, [_Socket, _Method, _RawPath, _Version, Headers]}=THIS) ->
-    case string:right(RelPath, 1) of
-        "/" ->
-            maybe_serve_file(directory_index(FullPath), ExtraHeaders, THIS);
-        _   ->
-            Host = mochiweb_headers:get_value("host", Headers),
-            Location = "http://" ++ Host  ++ "/" ++ RelPath ++ "/",
-            LocationBin = list_to_binary(Location),
-            MoreHeaders = [{"Location", Location},
-                           {"Content-Type", "text/html"} | ExtraHeaders],
-            Top = <<"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">"
-            "<html><head>"
-            "<title>301 Moved Permanently</title>"
-            "</head><body>"
-            "<h1>Moved Permanently</h1>"
-            "<p>The document has moved <a href=\"">>,
-            Bottom = <<">here</a>.</p></body></html>\n">>,
-            Body = <<Top/binary, LocationBin/binary, Bottom/binary>>,
-            respond({301, MoreHeaders, Body}, THIS)
-    end.
-
-maybe_serve_file(File, ExtraHeaders, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case file:read_file_info(File) of
-        {ok, FileInfo} ->
-            LastModified = httpd_util:rfc1123_date(FileInfo#file_info.mtime),
-            case get_header_value("if-modified-since", THIS) of
-                LastModified ->
-                    respond({304, ExtraHeaders, ""}, THIS);
-                _ ->
-                    case file:open(File, [raw, binary]) of
-                        {ok, IoDevice} ->
-                            ContentType = mochiweb_util:guess_mime(File),
-                            Res = ok({ContentType,
-                                      [{"last-modified", LastModified}
-                                       | ExtraHeaders],
-                                      {file, IoDevice}}, THIS),
-                            ok = file:close(IoDevice),
-                            Res;
-                        _ ->
-                            not_found(ExtraHeaders, THIS)
-                    end
-            end;
-        {error, _} ->
-            not_found(ExtraHeaders, THIS)
-    end.
-
-server_headers() ->
-    [{"Server", "MochiWeb/1.0 (" ++ ?QUIP ++ ")"},
-     {"Date", httpd_util:rfc1123_date()}].
-
-make_code(X) when is_integer(X) ->
-    [integer_to_list(X), [" " | httpd_util:reason_phrase(X)]];
-make_code(Io) when is_list(Io); is_binary(Io) ->
-    Io.
-
-make_version({1, 0}) ->
-    <<"HTTP/1.0 ">>;
-make_version(_) ->
-    <<"HTTP/1.1 ">>.
-
-range_parts({file, IoDevice}, Ranges) ->
-    Size = mochiweb_io:iodevice_size(IoDevice),
-    F = fun (Spec, Acc) ->
-                case mochiweb_http:range_skip_length(Spec, Size) of
-                    invalid_range ->
-                        Acc;
-                    V ->
-                        [V | Acc]
-                end
-        end,
-    LocNums = lists:foldr(F, [], Ranges),
-    {ok, Data} = file:pread(IoDevice, LocNums),
-    Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
-                                   {Skip, Skip + Length - 1, PartialBody}
-                           end,
-                           LocNums, Data),
-    {Bodies, Size};
-range_parts(Body0, Ranges) ->
-    Body = iolist_to_binary(Body0),
-    Size = size(Body),
-    F = fun(Spec, Acc) ->
-                case mochiweb_http:range_skip_length(Spec, Size) of
-                    invalid_range ->
-                        Acc;
-                    {Skip, Length} ->
-                        <<_:Skip/binary, PartialBody:Length/binary, _/binary>> = Body,
-                        [{Skip, Skip + Length - 1, PartialBody} | Acc]
-                end
-        end,
-    {lists:foldr(F, [], Ranges), Size}.
-
-%% @spec accepted_encodings([encoding()], request()) -> [encoding()] | bad_accept_encoding_value
-%% @type encoding() = string().
-%%
-%% @doc Returns a list of encodings accepted by a request. Encodings that are
-%%      not supported by the server will not be included in the return list.
-%%      This list is computed from the "Accept-Encoding" header and
-%%      its elements are ordered, descendingly, according to their Q values.
-%%
-%%      Section 14.3 of the RFC 2616 (HTTP 1.1) describes the "Accept-Encoding"
-%%      header and the process of determining which server supported encodings
-%%      can be used for encoding the body for the request's response.
-%%
-%%      Examples
-%%
-%%      1) For a missing "Accept-Encoding" header:
-%%         accepted_encodings(["gzip", "identity"]) -> ["identity"]
-%%
-%%      2) For an "Accept-Encoding" header with value "gzip, deflate":
-%%         accepted_encodings(["gzip", "identity"]) -> ["gzip", "identity"]
-%%
-%%      3) For an "Accept-Encoding" header with value "gzip;q=0.5, deflate":
-%%         accepted_encodings(["gzip", "deflate", "identity"]) ->
-%%            ["deflate", "gzip", "identity"]
-%%
-accepted_encodings(SupportedEncodings, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    AcceptEncodingHeader = case get_header_value("Accept-Encoding", THIS) of
-        undefined ->
-            "";
-        Value ->
-            Value
-    end,
-    case mochiweb_util:parse_qvalues(AcceptEncodingHeader) of
-        invalid_qvalue_string ->
-            bad_accept_encoding_value;
-        QList ->
-            mochiweb_util:pick_accepted_encodings(
-                QList, SupportedEncodings, "identity"
-            )
-    end.
-
-%% @spec accepts_content_type(string() | binary(), request()) -> boolean() | bad_accept_header
-%%
-%% @doc Determines whether a request accepts a given media type by analyzing its
-%%      "Accept" header.
-%%
-%%      Examples
-%%
-%%      1) For a missing "Accept" header:
-%%         accepts_content_type("application/json") -> true
-%%
-%%      2) For an "Accept" header with value "text/plain, application/*":
-%%         accepts_content_type("application/json") -> true
-%%
-%%      3) For an "Accept" header with value "text/plain, */*; q=0.0":
-%%         accepts_content_type("application/json") -> false
-%%
-%%      4) For an "Accept" header with value "text/plain; q=0.5, */*; q=0.1":
-%%         accepts_content_type("application/json") -> true
-%%
-%%      5) For an "Accept" header with value "text/*; q=0.0, */*":
-%%         accepts_content_type("text/plain") -> false
-%%
-accepts_content_type(ContentType1, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    ContentType = re:replace(ContentType1, "\\s", "", [global, {return, list}]),
-    AcceptHeader = accept_header(THIS),
-    case mochiweb_util:parse_qvalues(AcceptHeader) of
-        invalid_qvalue_string ->
-            bad_accept_header;
-        QList ->
-            [MainType, _SubType] = string:tokens(ContentType, "/"),
-            SuperType = MainType ++ "/*",
-            lists:any(
-                fun({"*/*", Q}) when Q > 0.0 ->
-                        true;
-                    ({Type, Q}) when Q > 0.0 ->
-                        Type =:= ContentType orelse Type =:= SuperType;
-                    (_) ->
-                        false
-                end,
-                QList
-            ) andalso
-            (not lists:member({ContentType, 0.0}, QList)) andalso
-            (not lists:member({SuperType, 0.0}, QList))
-    end.
-
-%% @spec accepted_content_types([string() | binary()], request()) -> [string()] | bad_accept_header
-%%
-%% @doc Filters which of the given media types this request accepts. This filtering
-%%      is performed by analyzing the "Accept" header. The returned list is sorted
-%%      according to the preferences specified in the "Accept" header (higher Q values
-%%      first). If two or more types have the same preference (Q value), they're order
-%%      in the returned list is the same as they're order in the input list.
-%%
-%%      Examples
-%%
-%%      1) For a missing "Accept" header:
-%%         accepted_content_types(["text/html", "application/json"]) ->
-%%             ["text/html", "application/json"]
-%%
-%%      2) For an "Accept" header with value "text/html, application/*":
-%%         accepted_content_types(["application/json", "text/html"]) ->
-%%             ["application/json", "text/html"]
-%%
-%%      3) For an "Accept" header with value "text/html, */*; q=0.0":
-%%         accepted_content_types(["text/html", "application/json"]) ->
-%%             ["text/html"]
-%%
-%%      4) For an "Accept" header with value "text/html; q=0.5, */*; q=0.1":
-%%         accepts_content_types(["application/json", "text/html"]) ->
-%%             ["text/html", "application/json"]
-%%
-accepted_content_types(Types1, {?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    Types = lists:map(
-        fun(T) -> re:replace(T, "\\s", "", [global, {return, list}]) end,
-        Types1),
-    AcceptHeader = accept_header(THIS),
-    case mochiweb_util:parse_qvalues(AcceptHeader) of
-        invalid_qvalue_string ->
-            bad_accept_header;
-        QList ->
-            TypesQ = lists:foldr(
-                fun(T, Acc) ->
-                    case proplists:get_value(T, QList) of
-                        undefined ->
-                            [MainType, _SubType] = string:tokens(T, "/"),
-                            case proplists:get_value(MainType ++ "/*", QList) of
-                                undefined ->
-                                    case proplists:get_value("*/*", QList) of
-                                        Q when is_float(Q), Q > 0.0 ->
-                                            [{Q, T} | Acc];
-                                        _ ->
-                                            Acc
-                                    end;
-                                Q when Q > 0.0 ->
-                                    [{Q, T} | Acc];
-                                _ ->
-                                    Acc
-                            end;
-                        Q when Q > 0.0 ->
-                            [{Q, T} | Acc];
-                        _ ->
-                            Acc
-                    end
-                end,
-                [], Types),
-            % Note: Stable sort. If 2 types have the same Q value we leave them in the
-            % same order as in the input list.
-            SortFun = fun({Q1, _}, {Q2, _}) -> Q1 >= Q2 end,
-            [Type || {_Q, Type} <- lists:sort(SortFun, TypesQ)]
-    end.
-
-accept_header({?MODULE, [_Socket, _Method, _RawPath, _Version, _Headers]}=THIS) ->
-    case get_header_value("Accept", THIS) of
-        undefined ->
-            "*/*";
-        Value ->
-            Value
-    end.
-
-%%
-%% Tests
-%%
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
--endif.
diff --git a/rabbitmq-server/plugins-src/rabbitmq-amqp1.0/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-amqp1.0/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index a4dca1c71d9a98026cadbe62361cb6ab8b7304ae..22bac87b67d091ff7dc00c68bd9602e878d7a5ee 100644 (file)
@@ -4,9 +4,13 @@ This plugin adds AMQP 1.0 support to RabbitMQ.
 
 # Status
 
-This is a prototype.  You can send and receive messages between 0-9-1
-or 0-8 clients and 1.0 clients with broadly the same semantics as you
-would get with 0-9-1.
+This is mostly a prototype, but it is supported. We describe it as a
+prototype since the amount of real world use and thus battle-testing
+it has received is not so large as that of the STOMP or MQTT
+plugins. Howver, bugs do get fixed as they are reported.
+
+You can send and receive messages between 0-9-1 or 0-8 clients and 1.0
+clients with broadly the same semantics as you would get with 0-9-1.
 
 # Building and configuring
 
@@ -157,6 +161,7 @@ For targets, addresses are:
     | "/topic/"     RK        Publish to amq.topic with routing key RK
     | "/amq/queue/" Q         Publish to default exchange with routing key Q
     | "/queue/"     Q         Publish to default exchange with routing key Q
+    | Q (no leading slash)    Publish to default exchange with routing key Q
     | "/queue"                Publish to default exchange with message subj as routing key
 
 For sources, addresses are:
@@ -165,6 +170,11 @@ For sources, addresses are:
     | "/topic/"     RK        Consume from temp queue bound to amq.topic with routing key RK
     | "/amq/queue/" Q         Consume from Q
     | "/queue/"     Q         Consume from Q
+    | Q (no leading slash)    Consume from Q
+
+The intent is that the source and destination address formats should be
+mostly the same as those supported by the STOMP plugin, to the extent
+permitted by AMQP 1.0 semantics.
 
 ## Virtual Hosts
 
index 8dd63b950bb4327e83871eb66cc5ca91aa5fd316..145cfe5bbcbfd15b780db298e323ba6f3d146c7a 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 import sys
 import os
 import re
@@ -87,7 +87,7 @@ def print_hrl(types, defines):
 def print_define(opt, source):
     (name, value) = opt
     if source == 'symbol':
-        quoted = '"%s"' % value
+        quoted = '<<"%s">>' % value
     else:
         quoted = value
     print """-define(V_1_0_%s, {%s, %s}).""" % (name, source, quoted)
index 21d780985b614c83167b5b94f8d4c18e7f970c4d..2277cb37d5ff019241790b4cc6dc9a2b26c2d518 100644 (file)
@@ -124,4 +124,4 @@ constructor(symbol) ->
      <<16#a3>>.
 
 generate(symbol, Value) ->
-    [<<(length(Value)):8>>, list_to_binary(Value)].
+    [<<(size(Value)):8>>, Value].
index c5bde6b61d3fe2a786681a3c037573c7666c32ab..a4b1b609dd3abebc55d7c1b150709bdbcf6e1da1 100644 (file)
@@ -64,7 +64,7 @@ keys(Record) ->
     [{symbol, symbolify(K)} || K <- rabbit_amqp1_0_framing0:fields(Record)].
 
 symbolify(FieldName) when is_atom(FieldName) ->
-    re:replace(atom_to_list(FieldName), "_", "-", [{return,list}, global]).
+    re:replace(atom_to_list(FieldName), "_", "-", [{return,binary}, global]).
 
 %% TODO: in fields of composite types with multiple=true, "a null
 %% value and a zero-length array (with a correct type for its
@@ -105,23 +105,19 @@ decode(Other) ->
 decode_map(Fields) ->
     [{decode(K), decode(V)} || {K, V} <- Fields].
 
-encode_described(list, ListOrNumber, Frame) ->
-    Desc = descriptor(ListOrNumber),
-    {described, Desc,
+encode_described(list, CodeNumber, Frame) ->
+    {described, {ulong, CodeNumber},
      {list, lists:map(fun encode/1, tl(tuple_to_list(Frame)))}};
-encode_described(map, ListOrNumber, Frame) ->
-    Desc = descriptor(ListOrNumber),
-    {described, Desc,
+encode_described(map, CodeNumber, Frame) ->
+    {described, {ulong, CodeNumber},
      {map, lists:zip(keys(Frame),
                      lists:map(fun encode/1, tl(tuple_to_list(Frame))))}};
-encode_described(binary, ListOrNumber, #'v1_0.data'{content = Content}) ->
-    Desc = descriptor(ListOrNumber),
-    {described, Desc, {binary, Content}};
-encode_described('*', ListOrNumber, #'v1_0.amqp_value'{content = Content}) ->
-    Desc = descriptor(ListOrNumber),
-    {described, Desc, Content};
-encode_described(annotations, ListOrNumber, Frame) ->
-    encode_described(map, ListOrNumber, Frame).
+encode_described(binary, CodeNumber, #'v1_0.data'{content = Content}) ->
+    {described, {ulong, CodeNumber}, {binary, Content}};
+encode_described('*', CodeNumber, #'v1_0.amqp_value'{content = Content}) ->
+    {described, {ulong, CodeNumber}, Content};
+encode_described(annotations, CodeNumber, Frame) ->
+    encode_described(map, CodeNumber, Frame).
 
 encode(X) ->
     rabbit_amqp1_0_framing0:encode(X).
@@ -140,12 +136,6 @@ symbol_for(X) ->
 number_for(X) ->
     rabbit_amqp1_0_framing0:number_for(X).
 
-descriptor(Symbol) when is_list(Symbol) ->
-    {symbol, Symbol};
-descriptor(Number) when is_number(Number) ->
-    {ulong, Number}.
-
-
 pprint(Thing) when is_tuple(Thing) ->
     case rabbit_amqp1_0_framing0:fields(Thing) of
         unknown -> Thing;
index 245581ee49a450e40b37c78f65505064c2eb9e4c..ab5d33118affce4b32bec89df711a7f62a163821 100644 (file)
@@ -47,10 +47,10 @@ attach(#'v1_0.attach'{name = Name,
     case ensure_target(Target,
                        #incoming_link{
                          name        = Name,
-                         route_state = rabbit_routing_util:init_state() },
+                         route_state = rabbit_routing_util:init_state(),
+                         delivery_count = InitTransfer },
                        DCh) of
-        {ok, ServerTarget,
-         IncomingLink = #incoming_link{ delivery_count = InitTransfer }} ->
+        {ok, ServerTarget, IncomingLink} ->
             {_, _Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source),
             %% Default is mixed
             Confirm =
@@ -81,7 +81,6 @@ attach(#'v1_0.attach'{name = Name,
                 IncomingLink#incoming_link{recv_settle_mode = RcvSettleMode},
             {ok, [Attach, Flow], IncomingLink1, Confirm};
         {error, Reason} ->
-            rabbit_log:warning("AMQP 1.0 attach rejected ~p~n", [Reason]),
             %% TODO proper link establishment protocol here?
             protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
                                "Attach rejected: ~p", [Reason])
@@ -194,7 +193,8 @@ ensure_target(Target = #'v1_0.target'{address       = Address,
                                       timeout       = _Timeout},
               Link = #incoming_link{ route_state = RouteState }, DCh) ->
     DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)},
-                     {check_exchange, true}],
+                     {check_exchange, true},
+                     {nowait, false}],
     case Dynamic of
         true ->
             protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
@@ -226,7 +226,7 @@ ensure_target(Target = #'v1_0.target'{address       = Address,
                     E
             end;
         _Else ->
-            {error, {unknown_address, Address}}
+            {error, {address_not_utf8_string, Address}}
     end.
 
 incoming_flow(#incoming_link{ delivery_count = Count }, Handle) ->
index 249908558bf8286e268670cfbf2c7bcc55a7f648..c418ba4bc1527bd851fdc7cf1fc009e52fe96d82 100644 (file)
@@ -91,9 +91,10 @@ attach(#'v1_0.attach'{name = Name,
                     protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
                                    "Consume failed: ~p", [Fail])
             end;
-        {error, _Reason} ->
-            %% TODO Deal with this properly -- detach and what have you
-            {ok, [#'v1_0.attach'{source = undefined}]}
+        {error, Reason} ->
+            %% TODO proper link establishment protocol here?
+            protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
+                               "Attach rejected: ~p", [Reason])
     end.
 
 credit_drained(#'basic.credit_drained'{credit_drained = CreditDrained},
@@ -156,7 +157,8 @@ ensure_source(Source = #'v1_0.source'{address       = Address,
                                       timeout       = _Timeout},
               Link = #outgoing_link{ route_state = RouteState }, DCh) ->
     DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)},
-                     {check_exchange, true}],
+                     {check_exchange, true},
+                     {nowait, false}],
     case Dynamic of
         true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
                                "Dynamic sources not supported", []);
@@ -176,10 +178,12 @@ ensure_source(Source = #'v1_0.source'{address       = Address,
                     ER = rabbit_routing_util:parse_routing(Dest),
                     ok = rabbit_routing_util:ensure_binding(Queue, ER, DCh),
                     {ok, Source, Link#outgoing_link{route_state = RouteState1,
-                                                    queue       = Queue}}
+                                                    queue       = Queue}};
+                {error, _} = E ->
+                    E
             end;
         _ ->
-            {error, {unknown_address, Address}}
+            {error, {address_not_utf8_string, Address}}
     end.
 
 delivery(Deliver = #'basic.deliver'{delivery_tag = DeliveryTag,
index add5b1a404e13e14235efb6211f8a31fe1f5adb2..fbff350d25814a52f9ad58acb6dfcf03b98d8cb9 100644 (file)
@@ -518,7 +518,7 @@ handle_input({frame_header_1_0, Mode},
     end,
     case Size of
         8 -> % length inclusive
-            {State, {frame_header_1_0, Mode}, 8}; %% heartbeat
+            State; %% heartbeat
         _ ->
             switch_callback(State, {frame_payload_1_0, Mode, DOff, Channel}, Size - 8)
     end;
@@ -545,8 +545,9 @@ start_1_0_connection(sasl, State = #v1{sock = Sock}) ->
     Ms = {array, symbol,
           case application:get_env(rabbitmq_amqp1_0, default_user)  of
               {ok, none} -> [];
-              {ok, _}    -> ["ANONYMOUS"]
-          end ++ [ atom_to_list(M) || M <- auth_mechanisms(Sock)]},
+              {ok, _}    -> [<<"ANONYMOUS">>]
+          end ++
+              [list_to_binary(atom_to_list(M)) || M <- auth_mechanisms(Sock)]},
     Mechanisms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms},
     ok = send_on_channel0(Sock, Mechanisms, rabbit_amqp1_0_sasl),
     start_1_0_connection0(sasl, State);
index 0955dd9a8ed87e4d4825b1ace93c4f77f951edb8..2f9be46971a0ab12a9718f54d06316298b1251fd 100644 (file)
@@ -147,7 +147,7 @@ handle_cast({frame, Frame, FlowPid},
     catch exit:Reason = #'v1_0.error'{} ->
             %% TODO shut down nicely like rabbit_channel
             End = #'v1_0.end'{ error = Reason },
-            rabbit_log:warning("Closing session for connection ~p: ~p~n",
+            rabbit_log:warning("Closing session for connection ~p:~n~p~n",
                                [ReaderPid, Reason]),
             ok = rabbit_amqp1_0_writer:send_command_sync(Sock, End),
             {stop, normal, State};
@@ -242,7 +242,12 @@ handle_control(#'v1_0.disposition'{state = Outcome,
                                                    requeue      = false};
                                #'v1_0.released'{} ->
                                    #'basic.reject'{delivery_tag = DeliveryTag,
-                                                   requeue      = true}
+                                                   requeue      = true};
+                               _ ->
+                                   protocol_error(
+                                     ?V_1_0_AMQP_ERROR_INVALID_FIELD,
+                                     "Unrecognised state: ~p~n"
+                                     "Disposition was: ~p~n", [Outcome, Disp])
                            end)
         end,
     case rabbit_amqp1_0_session:settle(Disp, session(State), AckFun) of
index 4775fcfe8e7bd807cb8777759586b37ed4099116..3a1c6396b688ac4afe2c627c732dad6efcc7cf89 100644 (file)
@@ -1,5 +1,5 @@
-CLIENT_DIR=swiftmq_9_2_5_client
-CLIENT_PKG=$(CLIENT_DIR).zip
+CLIENT_DIR=swiftmq_9_7_1_client
+CLIENT_PKG=$(CLIENT_DIR).tar.gz
 
 .PHONY: test
 
@@ -8,11 +8,13 @@ test: build/lib
 
 build/lib: $(CLIENT_PKG)
        mkdir -p build/tmp
-       unzip -d build/tmp $(CLIENT_PKG)
+       tar -zx -f $(CLIENT_PKG) -C build/tmp
        mkdir -p build/lib
        mv build/tmp/$(CLIENT_DIR)/jars/*.jar build/lib
        rm -rf build/tmp
        cp ../lib-java/*.jar build/lib
+       (cd ../../../rabbitmq-java-client && ant dist)
+       cp ../../../rabbitmq-java-client/build/dist/rabbitmq-client.jar build/lib
 
 $(CLIENT_PKG):
        @echo
index b58e2a5be8c11ad6ea8b9ff972f0bc0834395a3a..2db131f844fbe9016ab354ad1f0bd0f487772767 100644 (file)
@@ -1,7 +1,10 @@
 package com.rabbitmq.amqp1_0.tests.swiftmq;
 
+import com.rabbitmq.client.*;
 import com.swiftmq.amqp.AMQPContext;
 import com.swiftmq.amqp.v100.client.*;
+import com.swiftmq.amqp.v100.client.Connection;
+import com.swiftmq.amqp.v100.client.Consumer;
 import com.swiftmq.amqp.v100.generated.messaging.message_format.*;
 import com.swiftmq.amqp.v100.generated.messaging.message_format.Properties;
 import com.swiftmq.amqp.v100.messaging.AMQPMessage;
@@ -213,16 +216,6 @@ public class SwiftMQTests extends TestCase {
         route(QUEUE,                      "test",                  "",         true);
         route("test",                     "test",                  "",         true);
 
-        try {
-            route(QUEUE,                  "/exchange/missing",    "",        false);
-            fail("Missing exchange should fail");
-        } catch (Exception e) { }
-
-        try {
-            route("/exchange/missing/",    QUEUE,                  "",        false);
-            fail("Missing exchange should fail");
-        } catch (Exception e) { }
-
         route("/topic/#.c.*",              "/topic/a.b.c.d",        "",        true);
         route("/topic/#.c.*",              "/exchange/amq.topic",   "a.b.c.d", true);
         route("/exchange/amq.topic/#.y.*", "/topic/w.x.y.z",        "",        true);
@@ -242,6 +235,19 @@ public class SwiftMQTests extends TestCase {
         emptyQueue(QUEUE);
     }
 
+    public void testRoutingInvalidRoutes() throws Exception {
+        ConnectionFactory factory = new ConnectionFactory();
+        com.rabbitmq.client.Connection connection = factory.newConnection();
+        Channel channel = connection.createChannel();
+        channel.queueDeclare("transient", false, false, false, null);
+        connection.close();
+
+        for (String dest : Arrays.asList("/exchange/missing", "/queue/transient", "/fruit/orange")) {
+            routeInvalidSource(dest);
+            routeInvalidTarget(dest);
+        }
+    }
+
     private void emptyQueue(String q) throws Exception {
         AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
         Connection conn = new Connection(ctx, host, port, false);
@@ -291,6 +297,42 @@ public class SwiftMQTests extends TestCase {
         conn.close();
     }
 
+    private void routeInvalidSource(String consumerSource) throws Exception {
+        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+        Connection conn = new Connection(ctx, host, port, false);
+        conn.connect();
+        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+        try {
+            Consumer c = s.createConsumer(consumerSource, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
+            c.close();
+            fail("Source '" + consumerSource + "' should fail");
+        }
+        catch (Exception e) {
+            // no-op
+        }
+        finally {
+            conn.close();
+        }
+    }
+
+    private void routeInvalidTarget(String producerTarget) throws Exception {
+        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
+        Connection conn = new Connection(ctx, host, port, false);
+        conn.connect();
+        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
+        try {
+            Producer p = s.createProducer(producerTarget, QoS.AT_LEAST_ONCE);
+            p.close();
+            fail("Target '" + producerTarget + "' should fail");
+        }
+        catch (Exception e) {
+            // no-op
+        }
+        finally {
+            conn.close();
+        }
+    }
+
     // TODO: generalise to a comparison of all immutable parts of messages
     private boolean compareMessageData(AMQPMessage m1, AMQPMessage m2) throws IOException {
         ByteArrayOutputStream b1 = new ByteArrayOutputStream();
diff --git a/rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 0f519d81f5f11781a254612d8028dce2d629e8a1..bca4dcb2a88305490c3b29b43c77efb940c8df3f 100755 (executable)
@@ -6,7 +6,7 @@
 
 sudo apt-get --yes purge slapd
 sudo rm -rf /var/lib/ldap
-sudo apt-get --yes install slapd
+sudo apt-get --yes install slapd ldap-utils
 sleep 1
 
 DIR=$(dirname $0)
index 25abf44e24f150438a7784c2182339c0e798c7c1..02c22ee4b4142ae677b7fa46c1de413c5ede78d6 100644 (file)
@@ -2,6 +2,8 @@ RELEASABLE:=true
 DEPS:=rabbitmq-server rabbitmq-erlang-client eldap-wrapper
 
 ifeq ($(shell nc -z localhost 389 && echo true),true)
-WITH_BROKER_TEST_COMMANDS:=eunit:test(rabbit_auth_backend_ldap_test,[verbose])
+WITH_BROKER_TEST_COMMANDS:=eunit:test([rabbit_auth_backend_ldap_unit_test,rabbit_auth_backend_ldap_test],[verbose])
 WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/etc/rabbit-test
+else
+$(warning Not running LDAP tests; no LDAP server found on localhost)
 endif
index 8fe976abc0afdcf7a85ce0f92747081fe383e27a..943ac551cac7232622610d31f9f5a9f6496415c7 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_backend_ldap).
 -include_lib("eldap/include/eldap.hrl").
 -include_lib("rabbit_common/include/rabbit.hrl").
 
--behaviour(rabbit_auth_backend).
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
 
--export([description/0]).
--export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
+-export([user_login_authentication/2, user_login_authorization/1,
+         check_vhost_access/3, check_resource_access/3]).
 
 -define(L(F, A),  log("LDAP "         ++ F, A)).
 -define(L1(F, A), log("    LDAP "     ++ F, A)).
 
 %%--------------------------------------------------------------------
 
-description() ->
-    [{name, <<"LDAP">>},
-     {description, <<"LDAP authentication / authorisation">>}].
-
-%%--------------------------------------------------------------------
-
-check_user_login(Username, []) ->
+user_login_authentication(Username, []) ->
     %% Without password, e.g. EXTERNAL
     ?L("CHECK: passwordless login for ~s", [Username]),
     R = with_ldap(creds(none),
@@ -51,14 +46,14 @@ check_user_login(Username, []) ->
        [Username, log_result(R)]),
     R;
 
-check_user_login(Username, [{password, <<>>}]) ->
+user_login_authentication(Username, [{password, <<>>}]) ->
     %% Password "" is special in LDAP, see
     %% https://tools.ietf.org/html/rfc4513#section-5.1.2
     ?L("CHECK: unauthenticated login for ~s", [Username]),
     ?L("DECISION: unauthenticated login for ~s: denied", [Username]),
     {refused, "user '~s' - unauthenticated bind not allowed", [Username]};
 
-check_user_login(User, [{password, PW}]) ->
+user_login_authentication(User, [{password, PW}]) ->
     ?L("CHECK: login for ~s", [User]),
     R = case dn_lookup_when() of
             prebind -> UserDN = username_to_dn_prebind(User),
@@ -70,11 +65,18 @@ check_user_login(User, [{password, PW}]) ->
     ?L("DECISION: login for ~s: ~p", [User, log_result(R)]),
     R;
 
-check_user_login(Username, AuthProps) ->
+user_login_authentication(Username, AuthProps) ->
     exit({unknown_auth_props, Username, AuthProps}).
 
-check_vhost_access(User = #user{username = Username,
-                                impl     = #impl{user_dn = UserDN}}, VHost) ->
+user_login_authorization(Username) ->
+    case user_login_authentication(Username, []) of
+        {ok, #auth_user{impl = Impl}} -> {ok, Impl};
+        Else                          -> Else
+    end.
+
+check_vhost_access(User = #auth_user{username = Username,
+                                     impl     = #impl{user_dn = UserDN}},
+                   VHost, _Sock) ->
     Args = [{username, Username},
             {user_dn,  UserDN},
             {vhost,    VHost}],
@@ -84,8 +86,8 @@ check_vhost_access(User = #user{username = Username,
        [log_vhost(Args), log_user(User), log_result(R)]),
     R.
 
-check_resource_access(User = #user{username = Username,
-                                   impl     = #impl{user_dn = UserDN}},
+check_resource_access(User = #auth_user{username = Username,
+                                        impl     = #impl{user_dn = UserDN}},
                       #resource{virtual_host = VHost, kind = Type, name = Name},
                       Permission) ->
     Args = [{username,   Username},
@@ -133,7 +135,7 @@ evaluate0({in_group, DNPattern}, Args, User, LDAP) ->
     evaluate({in_group, DNPattern, "member"}, Args, User, LDAP);
 
 evaluate0({in_group, DNPattern, Desc}, Args,
-          #user{impl = #impl{user_dn = UserDN}}, LDAP) ->
+          #auth_user{impl = #impl{user_dn = UserDN}}, LDAP) ->
     Filter = eldap:equalityMatch(Desc, UserDN),
     DN = fill(DNPattern, Args),
     R = object_exists(DN, Filter, LDAP),
@@ -247,33 +249,24 @@ with_ldap({error, _} = E, _Fun, _State) ->
 %% TODO - ATM we create and destroy a new LDAP connection on every
 %% call. This could almost certainly be more efficient.
 with_ldap({ok, Creds}, Fun, Servers) ->
-    Opts0 = [{ssl, env(use_ssl)}, {port, env(port)}],
-    SSLOpts = env(ssl_options),
-    %% We can't just pass through [] as sslopts in the old case, eldap
-    %% exit()s when you do that.
-    Opts1 = case {SSLOpts, rabbit_misc:version_compare(
-                             erlang:system_info(version), "5.10")} of %% R16A
-                {[], _}  -> Opts0;
-                {_,  lt} -> exit({ssl_options_requires_min_r16a});
-                {_,  _}  -> [{sslopts, SSLOpts} | Opts0]
-            end,
-    Opts2 = case env(log) of
+    Opts0 = [{port, env(port)}],
+    Opts1 = case env(log) of
                 network ->
                     Pre = "    LDAP network traffic: ",
                     rabbit_log:info(
                       "    LDAP connecting to servers: ~p~n", [Servers]),
                     [{log, fun(1, S, A) -> rabbit_log:warning(Pre ++ S, A);
                               (2, S, A) -> rabbit_log:info   (Pre ++ S, A)
-                           end} | Opts1];
+                           end} | Opts0];
                 _ ->
-                    Opts1
+                    Opts0
             end,
     %% eldap defaults to 'infinity' but doesn't allow you to set that. Harrumph.
     Opts = case env(timeout) of
-               infinity -> Opts2;
-               MS       -> [{timeout, MS} | Opts2]
+               infinity -> Opts1;
+               MS       -> [{timeout, MS} | Opts1]
            end,
-    case eldap:open(Servers, Opts) of
+    case eldap_open(Servers, Opts) of
         {ok, LDAP} ->
             try Creds of
                 anon ->
@@ -300,6 +293,43 @@ with_ldap({ok, Creds}, Fun, Servers) ->
             Error
     end.
 
+eldap_open(Servers, Opts) ->
+    case eldap:open(Servers, ssl_conf() ++ Opts) of
+        {ok, LDAP} ->
+            TLS = env(use_starttls),
+            case {TLS, at_least("5.10.4")} of %%R16B03
+                {false, _}     -> {ok, LDAP};
+                {true,  false} -> exit({starttls_requires_min_r16b3});
+                {true,  _}     -> TLSOpts = ssl_options(),
+                                  ELDAP = eldap, %% Fool xref
+                                  case ELDAP:start_tls(LDAP, TLSOpts) of
+                                      ok    -> {ok, LDAP};
+                                      Error -> Error
+                                  end
+            end;
+        Error ->
+            Error
+    end.
+
+ssl_conf() ->
+    %% We must make sure not to add SSL options unless a) we have at least R16A
+    %% b) we have SSL turned on (or it breaks StartTLS...)
+    case env(use_ssl) of
+        false -> [{ssl, false}];
+        true  -> %% Only the unfixed version can be []
+                 case {env(ssl_options), at_least("5.10")} of %% R16A
+                     {_,  true}  -> [{ssl, true}, {sslopts, ssl_options()}];
+                     {[], _}     -> [{ssl, true}];
+                     {_,  false} -> exit({ssl_options_requires_min_r16a})
+                 end
+    end.
+
+ssl_options() ->
+    rabbit_networking:fix_ssl_options(env(ssl_options)).
+
+at_least(Ver) ->
+    rabbit_misc:version_compare(erlang:system_info(version), Ver) =/= lt.
+
 env(F) ->
     {ok, V} = application:get_env(rabbitmq_auth_backend_ldap, F),
     V.
@@ -309,23 +339,33 @@ do_login(Username, PrebindUserDN, Password, LDAP) ->
                  unknown -> username_to_dn(Username, LDAP, dn_lookup_when());
                  _       -> PrebindUserDN
              end,
-    User = #user{username     = Username,
-                 auth_backend = ?MODULE,
-                 impl         = #impl{user_dn  = UserDN,
-                                      password = Password}},
-    TagRes = [begin
-                  ?L1("CHECK: does ~s have tag ~s?", [Username, Tag]),
-                  R = evaluate(Q, [{username, Username},
-                                   {user_dn,  UserDN}], User, LDAP),
-                  ?L1("DECISION: does ~s have tag ~s? ~p",
-                      [Username, Tag, R]),
-                  {Tag, R}
-              end || {Tag, Q} <- env(tag_queries)],
-    case [E || {_, E = {error, _}} <- TagRes] of
-        []      -> {ok, User#user{tags = [Tag || {Tag, true} <- TagRes]}};
-        [E | _] -> E
+    User = #auth_user{username     = Username,
+                      impl         = #impl{user_dn  = UserDN,
+                                           password = Password}},
+    DTQ = fun (LDAPn) -> do_tag_queries(Username, UserDN, User, LDAPn) end,
+    TagRes = case env(other_bind) of
+                 as_user -> DTQ(LDAP);
+                 _       -> with_ldap(creds(User), DTQ)
+             end,
+    case TagRes of
+        {ok, L} -> case [E || {_, E = {error, _}} <- L] of
+                       []      -> Tags = [Tag || {Tag, true} <- L],
+                                  {ok, User#auth_user{tags = Tags}};
+                       [E | _] -> E
+                   end;
+        E       -> E
     end.
 
+do_tag_queries(Username, UserDN, User, LDAP) ->
+    {ok, [begin
+              ?L1("CHECK: does ~s have tag ~s?", [Username, Tag]),
+              R = evaluate(Q, [{username, Username},
+                               {user_dn,  UserDN}], User, LDAP),
+              ?L1("DECISION: does ~s have tag ~s? ~p",
+                  [Username, Tag, R]),
+              {Tag, R}
+          end || {Tag, Q} <- env(tag_queries)]}.
+
 dn_lookup_when() -> case {env(dn_lookup_attribute), env(dn_lookup_bind)} of
                         {none, _}       -> never;
                         {_,    as_user} -> postbind;
@@ -364,8 +404,8 @@ creds(User) -> creds(User, env(other_bind)).
 
 creds(none, as_user) ->
     {error, "'other_bind' set to 'as_user' but no password supplied"};
-creds(#user{impl = #impl{user_dn = UserDN, password = Password}}, as_user) ->
-    {ok, {UserDN, Password}};
+creds(#auth_user{impl = #impl{user_dn = UserDN, password = PW}}, as_user) ->
+    {ok, {UserDN, PW}};
 creds(_, Creds) ->
     {ok, Creds}.
 
@@ -380,13 +420,13 @@ fill(Fmt, Args) ->
     ?L2("template result: \"~s\"", [R]),
     R.
 
-log_result({ok, #user{}})   -> ok;
-log_result(true)            -> ok;
-log_result(false)           -> denied;
-log_result({refused, _, _}) -> denied;
-log_result(E)               -> E.
+log_result({ok, #auth_user{}}) -> ok;
+log_result(true)               -> ok;
+log_result(false)              -> denied;
+log_result({refused, _, _})    -> denied;
+log_result(E)                  -> E.
 
-log_user(#user{username = U}) -> rabbit_misc:format("\"~s\"", [U]).
+log_user(#auth_user{username = U}) -> rabbit_misc:format("\"~s\"", [U]).
 
 log_vhost(Args) ->
     rabbit_misc:format("access to vhost \"~s\"", [pget(vhost, Args)]).
index de13c3bfe2e4b64394e78044d91f225ae29fccc7..e0cd7aa13dbc59ae64534172e8150de94095cbe5 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_backend_ldap_app).
@@ -32,7 +32,8 @@ start(_Type, _StartArgs) ->
                    "in the list of auth_backends. LDAP auth will not work.~n")
     end,
     {ok, SSL} = application:get_env(rabbitmq_auth_backend_ldap, use_ssl),
-    case SSL of
+    {ok, TLS} = application:get_env(rabbitmq_auth_backend_ldap, use_starttls),
+    case SSL orelse TLS of
         true  -> rabbit_networking:ensure_ssl();
         false -> ok
     end,
index c7cf113558fd1b24709699842f48bd51a5cbccf8..47c3d660ca9391f74e83039ed644d391c7fc39b6 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_backend_ldap_util).
@@ -25,7 +25,10 @@ fill(Fmt, [{K, V} | T]) ->
     Var = [[$\\, $$, ${] ++ atom_to_list(K) ++ [$}]],
     fill(re:replace(Fmt, Var, [to_repl(V)], [global]), T).
 
-to_repl(V) when is_atom(V) ->
-    atom_to_list(V);
-to_repl(V) ->
-    V.
+to_repl(V) when is_atom(V)   -> to_repl(atom_to_list(V));
+to_repl(V) when is_binary(V) -> to_repl(binary_to_list(V));
+to_repl([])                  -> [];
+to_repl([$\\ | T])           -> [$\\, $\\ | to_repl(T)];
+to_repl([$&  | T])           -> [$\\, $&  | to_repl(T)];
+to_repl([H   | T])           -> [H        | to_repl(T)].
+
index e1b76284454ee45ca17d27f2900959d131109aee..b2139d66d4b7663d485d38518c38ebbcf9856f2e 100644 (file)
@@ -15,6 +15,7 @@
           {resource_access_query, {constant, true}},
           {tag_queries,           [{administrator, {constant, false}}]},
           {use_ssl,               false},
+          {use_starttls,          false},
           {ssl_options,           []},
           {port,                  389},
           {timeout,               infinity},
index 72fbc84de927a69ef27424cc0fe9b84a8aaa6c28..2b92632e29f3d4c9a25e3437026e6f24ccbeaf4f 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_backend_ldap_test).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/test/src/rabbit_auth_backend_ldap_unit_test.erl b/rabbitmq-server/plugins-src/rabbitmq-auth-backend-ldap/test/src/rabbit_auth_backend_ldap_unit_test.erl
new file mode 100644 (file)
index 0000000..47223f9
--- /dev/null
@@ -0,0 +1,33 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap_unit_test).
+
+-include_lib("eunit/include/eunit.hrl").
+
+fill_test() ->
+    F = fun(Fmt, Args, Res) ->
+                ?assertEqual(Res, rabbit_auth_backend_ldap_util:fill(Fmt, Args))
+        end,
+    F("x${username}x", [{username,  "ab"}],     "xabx"),
+    F("x${username}x", [{username,  ab}],       "xabx"),
+    F("x${username}x", [{username,  <<"ab">>}], "xabx"),
+    F("x${username}x", [{username,  ""}],       "xx"),
+    F("x${username}x", [{fusername, "ab"}],     "x${username}x"),
+    F("x${usernamex",  [{username,  "ab"}],     "x${usernamex"),
+    F("x${username}x", [{username,  "a\\b"}],   "xa\\bx"),
+    F("x${username}x", [{username,  "a&b"}],    "xa&bx"),
+    ok.
diff --git a/rabbitmq-server/plugins-src/rabbitmq-auth-mechanism-ssl/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-auth-mechanism-ssl/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 883a9dc8fc4d98bd58b62b05dc4357149d50f996..47cfcab6e9f7ab87cd65c7a2f731b7e97fc59cc4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 
@@ -29,7 +29,9 @@
                     {mfa,         {rabbit_registry, register,
                                    [auth_mechanism, <<"EXTERNAL">>, ?MODULE]}},
                     {requires,    rabbit_registry},
-                    {enables,     kernel_ready}]}).
+                    {enables,     kernel_ready},
+                    {cleanup,     {rabbit_registry, unregister,
+                                   [auth_mechanism, <<"EXTERNAL">>]}}]}).
 
 -record(state, {username = undefined}).
 
@@ -55,20 +57,21 @@ init(Sock) ->
     Username = case rabbit_net:peercert(Sock) of
                    {ok, C} ->
                        case rabbit_ssl:peer_cert_auth_name(C) of
-                           unsafe    -> {refused, "configuration unsafe", []};
-                           not_found -> {refused, "no name found", []};
+                           unsafe    -> {refused, none,
+                                         "configuration unsafe", []};
+                           not_found -> {refused, none, "no name found", []};
                            Name      -> Name
                        end;
                    {error, no_peercert} ->
-                       {refused, "no peer certificate", []};
+                       {refused, none, "no peer certificate", []};
                    nossl ->
-                       {refused, "not SSL connection", []}
+                       {refused, none, "not SSL connection", []}
                end,
     #state{username = Username}.
 
 handle_response(_Response, #state{username = Username}) ->
     case Username of
-        {refused, _, _} = E ->
+        {refused, _, _, _} = E ->
             E;
         _ ->
             rabbit_access_control:check_user_login(Username, [])
index 1867d3a47899deb90593401aa97c0cebba0628b6..7f6eff95419ce796f0589749cdfe13a44f890935 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_mechanism_ssl_app).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-consistent-hash-exchange/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-consistent-hash-exchange/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 381d0542ec273cbf7d90baab19184e8dc58e9343..68e3253241027204e2895fd9013f02cb98844c54 100644 (file)
@@ -33,7 +33,9 @@
      {mfa,         {rabbit_registry, register,
                     [exchange, <<"x-consistent-hash">>, ?MODULE]}},
      {requires,    rabbit_registry},
-     {enables,     kernel_ready}]}).
+     {enables,     kernel_ready},
+     {cleanup,     {rabbit_registry, unregister,
+                    [exchange, <<"x-consistent-hash">>]}}]}).
 
 -rabbit_boot_step(
    {rabbit_exchange_type_consistent_hash_mnesia,
diff --git a/rabbitmq-server/plugins-src/rabbitmq-erlang-client/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-erlang-client/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 71c392718a18b282ca5c87eafc760c538958920a..a42c666958c715a93b5303a82e59985261d85803 100644 (file)
@@ -11,7 +11,7 @@
 # The Original Code is RabbitMQ.
 #
 # The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 #
 
 VERSION=0.0.0
index 42ead353b436a85d4d3f8caf85dfd46609f12cfa..0b46f9fadab281e00ad6871e4ea34a934c4148ec 100644 (file)
@@ -11,7 +11,7 @@
 # The Original Code is RabbitMQ.
 #
 # The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 #
 
 VERSION=%%VSN%%
index 994d474f21142c57fa573d88d14b972cdc0e1dd1..9de9221f8bd4430dfebafa5f8eeafb25785c0b8b 100644 (file)
@@ -11,7 +11,7 @@
 # The Original Code is RabbitMQ.
 #
 # The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 #
 
 # The client library can either be built from source control or by downloading
index f4627b33f460973366c9f6692d75f99462fab8ff..c9c96add7789acf2ac4e81290ead1cedd7c35362 100644 (file)
@@ -3,6 +3,7 @@
   {vsn, "%%VSN%%"},
   {modules, []},
   {registered, [amqp_sup]},
-  {env, [{prefer_ipv6, false}]},
+  {env, [{prefer_ipv6, false},
+         {ssl_options, []}]},
   {mod, {amqp_client, []}},
   {applications, [kernel, stdlib, xmerl]}]}.
index cc1f48528243d84b17a6a33675acab4c95fb8f9a..bc74dd7de317e0c261d0539fb59d9d2aa8a58409 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -ifndef(AMQP_CLIENT_HRL).
index bdaf9c288f98834db114cca6eec2d2aee7f772c1..f990f04afd16364d4605f3c1c98eada7f6c3a1cd 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -include("amqp_client.hrl").
index dda4993bcdf542bf8aece9ecddc414beb0bc5aca..fbaa28c5de7699604d12367d368575634d2b12cd 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -include("amqp_client.hrl").
index 79e41ca4461047da3747ca64278614c0f45429b8..34618c5b98ea4c5383dfdacea57789612ca3cb9d 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -define(QUEUE_PREFIX, "/queue").
index dc808175ed4fa1cd346428654556179331620a0a..46b2a4d6e3b3efeb1a2148a3304e6b0298e29c2b 100644 (file)
@@ -19,7 +19,8 @@
              rabbit_command_assembler,
              rabbit_exchange_type,
              rabbit_exchange_decorator,
-             rabbit_auth_backend,
+             rabbit_authn_backend,
+             rabbit_authz_backend,
              rabbit_auth_mechanism,
              rabbit_framing_amqp_0_8,
              rabbit_framing_amqp_0_9_1,
@@ -27,6 +28,7 @@
              rabbit_misc,
              rabbit_msg_store_index,
              rabbit_net,
+             rabbit_networking,
              rabbit_nodes,
              rabbit_policy_validator,
              rabbit_reader,
index 576cdc10264ca802a9e38633038ccb466b34a806..9192cad6f816ec00d21d8ee14d9e46ae80373333 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index abff0e49b88b35f6a91f6ccb7d340b8005c8bcd0..11217954b61fd52d5eb72a88be9371d2dc7b84d0 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
@@ -74,7 +74,8 @@
 -export([call_consumer/2, subscribe/3]).
 -export([next_publish_seqno/1, wait_for_confirms/1, wait_for_confirms/2,
          wait_for_confirms_or_die/1, wait_for_confirms_or_die/2]).
--export([start_link/5, set_writer/2, connection_closing/3, open/1]).
+-export([start_link/5, set_writer/2, connection_closing/3, open/1,
+         enable_delivery_flow_control/1, notify_received/1]).
 
 -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
          handle_info/2]).
                 flow_handler       = none,
                 unconfirmed_set    = gb_sets:new(),
                 waiting_set        = gb_trees:empty(),
-                only_acks_received = true
+                only_acks_received = true,
+
+                %% true | false, only relevant in the direct
+                %% client case.
+                %% when true, consumers will manually notify
+                %% queue pids using rabbit_amqqueue:notify_sent/2
+                %% to prevent the queue from overwhelming slow
+                %% consumers that use automatic acknowledgement
+                %% mode.
+                delivery_flow_control = false
                }).
 
 %%---------------------------------------------------------------------------
@@ -342,6 +352,12 @@ start_link(Driver, Connection, ChannelNumber, Consumer, Identity) ->
 set_writer(Pid, Writer) ->
     gen_server:cast(Pid, {set_writer, Writer}).
 
+enable_delivery_flow_control(Pid) ->
+    gen_server:cast(Pid, enable_delivery_flow_control).
+
+notify_received({Pid, QPid, ServerChPid}) ->
+    gen_server:cast(Pid, {send_notify, {QPid, ServerChPid}}).
+
 %% @private
 connection_closing(Pid, ChannelCloseType, Reason) ->
     gen_server:cast(Pid, {connection_closing, ChannelCloseType, Reason}).
@@ -399,9 +415,19 @@ handle_call({subscribe, BasicConsume, Subscriber}, From, State) ->
                             State).
 
 %% @private
+handle_cast({set_writer, Writer}, State = #state{driver = direct}) ->
+    link(Writer),
+    {noreply, State#state{writer = Writer}};
 handle_cast({set_writer, Writer}, State) ->
     {noreply, State#state{writer = Writer}};
 %% @private
+handle_cast(enable_delivery_flow_control, State) ->
+    {noreply, State#state{delivery_flow_control = true}};
+%% @private
+handle_cast({send_notify, {QPid, ChPid}}, State) ->
+    rabbit_amqqueue:notify_sent(QPid, ChPid),
+    {noreply, State};
+%% @private
 handle_cast({cast, Method, AmqpMsg, Sender, noflow}, State) ->
     handle_method_to_server(Method, AmqpMsg, none, Sender, noflow, State);
 handle_cast({cast, Method, AmqpMsg, Sender, flow}, State) ->
@@ -458,9 +484,15 @@ handle_info({send_command, Method, Content}, State) ->
     handle_method_from_server(Method, Content, State);
 %% Received from rabbit_channel in the direct case
 %% @private
-handle_info({send_command_and_notify, Q, ChPid, Method, Content}, State) ->
-    handle_method_from_server(Method, Content, State),
-    rabbit_amqqueue:notify_sent(Q, ChPid),
+handle_info({send_command_and_notify, QPid, ChPid,
+             Method = #'basic.deliver'{}, Content},
+            State = #state{delivery_flow_control = MFC}) ->
+    case MFC of
+        false -> handle_method_from_server(Method, Content, State),
+                 rabbit_amqqueue:notify_sent(QPid, ChPid);
+        true  -> handle_method_from_server(Method, Content,
+                                           {self(), QPid, ChPid}, State)
+    end,
     {noreply, State};
 %% This comes from the writer or rabbit_channel
 %% @private
@@ -633,7 +665,9 @@ pre_do(_, _, _, State) ->
 %% Handling of methods from the server
 %%---------------------------------------------------------------------------
 
-handle_method_from_server(Method, Content, State = #state{closing = Closing}) ->
+safely_handle_method_from_server(Method, Content,
+                                 Continuation,
+                                 State = #state{closing = Closing}) ->
     case is_connection_method(Method) of
         true -> server_misbehaved(
                     #amqp_error{name        = command_invalid,
@@ -651,11 +685,28 @@ handle_method_from_server(Method, Content, State = #state{closing = Closing}) ->
                                       "server because channel is closing~n",
                                       [self(), {Method, Content}]),
                             {noreply, State};
-                    true -> handle_method_from_server1(Method,
-                                                       amqp_msg(Content), State)
+                    true ->
+                         Continuation()
                  end
     end.
 
+handle_method_from_server(Method, Content, State) ->
+    Fun = fun () ->
+                  handle_method_from_server1(Method,
+                                             amqp_msg(Content), State)
+          end,
+    safely_handle_method_from_server(Method, Content, Fun, State).
+
+handle_method_from_server(Method = #'basic.deliver'{},
+                          Content, DeliveryCtx, State) ->
+    Fun = fun () ->
+                  handle_method_from_server1(Method,
+                                             amqp_msg(Content),
+                                             DeliveryCtx,
+                                             State)
+          end,
+    safely_handle_method_from_server(Method, Content, Fun, State).
+
 handle_method_from_server1(#'channel.open_ok'{}, none, State) ->
     {noreply, rpc_bottom_half(ok, State)};
 handle_method_from_server1(#'channel.close'{reply_code = Code,
@@ -739,6 +790,12 @@ handle_method_from_server1(Method, none, State) ->
 handle_method_from_server1(Method, Content, State) ->
     {noreply, rpc_bottom_half({Method, Content}, State)}.
 
+%% only used with manual consumer-to-queue flow control
+handle_method_from_server1(#'basic.deliver'{} = Deliver, AmqpMsg,
+                           DeliveryCtx, State) ->
+    ok = call_to_consumer(Deliver, AmqpMsg, DeliveryCtx, State),
+    {noreply, State}.
+
 %%---------------------------------------------------------------------------
 %% Other handle_* functions
 %%---------------------------------------------------------------------------
@@ -920,5 +977,8 @@ handle_wait_for_confirms(From, Timeout,
 call_to_consumer(Method, Args, #state{consumer = Consumer}) ->
     amqp_gen_consumer:call_consumer(Consumer, Method, Args).
 
+call_to_consumer(Method, Args, DeliveryCtx, #state{consumer = Consumer}) ->
+    amqp_gen_consumer:call_consumer(Consumer, Method, Args, DeliveryCtx).
+
 safe_cancel_timer(undefined) -> ok;
 safe_cancel_timer(TRef)      -> erlang:cancel_timer(TRef).
index fbd2d0c17272f0a3d0908c4a157382000d719a4f..8fc4d8fb7db4cff321a2de24c53ab2aa9230dbb8 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
@@ -53,7 +53,6 @@ start_writer(_Sup, direct, [ConnPid, Node, User, VHost, Collector],
         rpc:call(Node, rabbit_direct, start_channel,
                  [ChNumber, ChPid, ConnPid, ConnName, ?PROTOCOL, User,
                   VHost, ?CLIENT_CAPABILITIES, Collector]),
-    link(RabbitCh),
     RabbitCh;
 start_writer(Sup, network, [Sock, FrameMax], ConnName, ChNumber, ChPid) ->
     {ok, Writer} = supervisor2:start_child(
index 2108b6a35b3764391d41ad60f20c0ba8a830dd17..6d461e1f743122b35123bb0b145b08762e96963b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index e805ea1b58d4f94cb9f107862bc9658ef16da556..b260cd265cbd3c102cae3753f308b3d89e74b362 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index f5fa1aeb426df6097db282d61e8cc0dcd893aa0c..83905d09a82650c5f88e6ca46cbdd37f0597547c 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index d6d044fb10b6e1273f9de4792d4ccdaf399e6fab..371b22567adbac9be282845838cf5069941d2929 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
index 5268dedd835008fd9ab5f605b4d612e3d8d3ef61..7bc8a2d225e445abc8a52b521e848d8b0aa8ca7e 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index 41717c2c840adbf0c3fd6a829b0950b04f19c634..58023757c51709e8abfe3c62c088a3113d402a6a 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index e0ed0b38fed66f010bc7b5f0637dd003f8e295d7..5cd7df73872ea71c71e80ce7310eb12860b221c9 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
                 params,
                 adapter_info,
                 collector,
-                closing_reason %% undefined | Reason
+                closing_reason, %% undefined | Reason
+                connected_at
                }).
 
 -define(INFO_KEYS, [type]).
 
 -define(CREATION_EVENT_KEYS, [pid, protocol, host, port, name,
                               peer_host, peer_port,
-                              user, vhost, client_properties, type]).
+                              user, vhost, client_properties, type,
+                              connected_at]).
 
 %%---------------------------------------------------------------------------
 
@@ -72,6 +74,10 @@ handle_message({force_event_refresh, Ref}, State = #state{node = Node}) ->
     {ok, State};
 handle_message(closing_timeout, State = #state{closing_reason = Reason}) ->
     {stop, {closing_timeout, Reason}, State};
+handle_message({'DOWN', _MRef, process, _ConnSup, shutdown}, State) ->
+    {stop, {shutdown, node_down}, State};
+handle_message({'DOWN', _MRef, process, _ConnSup, Reason}, State) ->
+    {stop, {remote_node_down, Reason}, State};
 handle_message(Msg, State) ->
     {stop, {unexpected_msg, Msg}, State}.
 
@@ -94,6 +100,7 @@ i(user,              #state{params = P}) -> P#amqp_params_direct.username;
 i(vhost,             #state{params = P}) -> P#amqp_params_direct.virtual_host;
 i(client_properties, #state{params = P}) ->
     P#amqp_params_direct.client_properties;
+i(connected_at,      #state{connected_at = T}) -> T;
 %% Optional adapter info
 i(protocol,     #state{adapter_info = I}) -> I#amqp_adapter_info.protocol;
 i(host,         #state{adapter_info = I}) -> I#amqp_adapter_info.host;
@@ -122,7 +129,8 @@ connect(Params = #amqp_params_direct{username     = Username,
     State1 = State#state{node         = Node,
                          vhost        = VHost,
                          params       = Params,
-                         adapter_info = ensure_adapter_info(Info)},
+                         adapter_info = ensure_adapter_info(Info),
+                         connected_at = rabbit_misc:now_to_ms(os:timestamp())},
     case rpc:call(Node, rabbit_direct, connect,
                   [{Username, Password}, VHost, ?PROTOCOL, self(),
                    connection_info(State1)]) of
@@ -130,6 +138,13 @@ connect(Params = #amqp_params_direct{username     = Username,
             {ok, ChMgr, Collector} = SIF(i(name, State1)),
             State2 = State1#state{user      = User,
                                   collector = Collector},
+            %% There's no real connection-level process on the remote
+            %% node for us to monitor or link to, but we want to
+            %% detect connection death if the remote node goes down
+            %% when there are no channels. So we monitor the
+            %% supervisor; that way we find out if the node goes down
+            %% or the rabbit app stops.
+            erlang:monitor(process, {rabbit_direct_client_sup, Node}),
             {ok, {ServerProperties, 0, ChMgr, State2}};
         {error, _} = E ->
             E;
index 517a2b332e1a9eaca6246b12825610888a400e5a..34b14239f1297b3b95ea6e70e03e0572138943e7 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @doc This module is an implementation of the amqp_gen_consumer
@@ -45,7 +45,8 @@
 -behaviour(amqp_gen_consumer).
 
 -export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3,
-         handle_cancel/2, handle_server_cancel/2, handle_deliver/3,
+         handle_cancel/2, handle_server_cancel/2,
+         handle_deliver/3, handle_deliver/4,
          handle_info/2, handle_call/3, terminate/2]).
 
 %%---------------------------------------------------------------------------
@@ -86,6 +87,10 @@ handle_server_cancel(M, C) ->
 handle_deliver(M, A, C) ->
     C ! {M, A},
     {ok, C}.
+handle_deliver(M, A, DeliveryCtx, C) ->
+    C ! {M, A, DeliveryCtx},
+    {ok, C}.
+
 
 %% @private
 handle_info({'DOWN', _MRef, process, C, Info}, C) ->
index 3cc64b791373de92da56630c5dc9dc5cd00f5fc2..55618ac8be6f254b3d5ee1b9800aa75cfb5f77a4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index d94194cd9fa96f27f352dd9a37905aea7d1009f6..68637ccf94231850b770e9f06f733fa68d716a4d 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @doc A behaviour module for implementing consumers for
@@ -31,7 +31,7 @@
 
 -behaviour(gen_server2).
 
--export([start_link/3, call_consumer/2, call_consumer/3]).
+-export([start_link/3, call_consumer/2, call_consumer/3, call_consumer/4]).
 -export([behaviour_info/1]).
 -export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
          handle_info/2, prioritise_info/3]).
@@ -71,6 +71,9 @@ call_consumer(Pid, Msg) ->
 call_consumer(Pid, Method, Args) ->
     gen_server2:call(Pid, {consumer_call, Method, Args}, infinity).
 
+call_consumer(Pid, Method, Args, DeliveryCtx) ->
+    gen_server2:call(Pid, {consumer_call, Method, Args, DeliveryCtx}, infinity).
+
 %%---------------------------------------------------------------------------
 %% Behaviour
 %%---------------------------------------------------------------------------
@@ -149,6 +152,19 @@ behaviour_info(callbacks) ->
      %% is received from the server.
      {handle_deliver, 3},
 
+     %% handle_deliver(Deliver, Message,
+     %%                DeliveryCtx, State) -> ok_error()
+     %% where
+     %%      Deliver = #'basic.deliver'{}
+     %%      Message = #amqp_msg{}
+     %%      DeliveryCtx = {pid(), pid(), pid()}
+     %%      State = state()
+     %%
+     %% This callback is invoked by the channel every time a basic.deliver
+     %% is received from the server. Only relevant for channels that use
+     %% direct client connection and manual flow control.
+     {handle_deliver, 4},
+
      %% handle_info(Info, State) -> ok_error()
      %% where
      %%      Info = any()
@@ -207,6 +223,15 @@ init([ConsumerModule, ExtraParams, Identity]) ->
 prioritise_info({'DOWN', _MRef, process, _Pid, _Info}, _Len, _State) -> 1;
 prioritise_info(_, _Len, _State)                                     -> 0.
 
+consumer_call_reply(Return, State) ->
+    case Return of
+        {ok, NewMState} ->
+            {reply, ok, State#state{module_state = NewMState}};
+        {error, Reason, NewMState} ->
+            {stop, {error, Reason}, {error, Reason},
+             State#state{module_state = NewMState}}
+    end.
+
 handle_call({consumer_call, Msg}, From,
             State = #state{module       = ConsumerModule,
                            module_state = MState}) ->
@@ -240,13 +265,14 @@ handle_call({consumer_call, Method, Args}, _From,
             #'basic.deliver'{} ->
                 ConsumerModule:handle_deliver(Method, Args, MState)
         end,
-    case Return of
-        {ok, NewMState} ->
-            {reply, ok, State#state{module_state = NewMState}};
-        {error, Reason, NewMState} ->
-            {stop, {error, Reason}, {error, Reason},
-             State#state{module_state = NewMState}}
-    end.
+    consumer_call_reply(Return, State);
+
+%% only supposed to be used with basic.deliver
+handle_call({consumer_call, Method = #'basic.deliver'{}, Args, DeliveryCtx}, _From,
+            State = #state{module       = ConsumerModule,
+                           module_state = MState}) ->
+    Return = ConsumerModule:handle_deliver(Method, Args, DeliveryCtx, MState),
+    consumer_call_reply(Return, State).
 
 handle_cast(_What, State) ->
     {noreply, State}.
index 258f73c223371d5405e7e5c4d53d52a4796d8f44..b8e4ff962be0928af1900078f938d9061343baeb 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index e18841b8b866b8f0dd495a4da2077d423b13f856..5edb44b4587303e7ccd2d38c0ba91b91a76e3e24 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
@@ -132,17 +132,23 @@ do_connect({Addr, Family},
         {error, _} = E -> E
     end;
 do_connect({Addr, Family},
-           AmqpParams = #amqp_params_network{ssl_options        = SslOpts,
+           AmqpParams = #amqp_params_network{ssl_options        = SslOpts0,
                                              port               = Port,
                                              connection_timeout = Timeout,
                                              socket_options     = ExtraOpts},
            SIF, State) ->
+    {ok, GlobalSslOpts} = application:get_env(amqp_client, ssl_options),
     app_utils:start_applications([asn1, crypto, public_key, ssl]),
     obtain(),
     case gen_tcp:connect(Addr, Port,
                          [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts,
                          Timeout) of
         {ok, Sock} ->
+            SslOpts = rabbit_networking:fix_ssl_options(
+                        orddict:to_list(
+                          orddict:merge(fun (_, _A, B) -> B end,
+                                        orddict:from_list(GlobalSslOpts),
+                                        orddict:from_list(SslOpts0)))),
             case ssl:connect(Sock, SslOpts) of
                 {ok, SslSock} ->
                     RabbitSslSock = #ssl_socket{ssl = SslSock, tcp = Sock},
@@ -298,7 +304,7 @@ client_properties(UserProperties) ->
                {<<"version">>,   longstr, list_to_binary(Vsn)},
                {<<"platform">>,  longstr, <<"Erlang">>},
                {<<"copyright">>, longstr,
-                <<"Copyright (c) 2007-2014 GoPivotal, Inc.">>},
+                <<"Copyright (c) 2007-2015 Pivotal Software, Inc.">>},
                {<<"information">>, longstr,
                 <<"Licensed under the MPL.  "
                   "See http://www.rabbitmq.com/">>},
index a192b6b108200006a9ae583bbb7159f99bd027bf..c5bed0d37d629910a1b2d05bb484d9b931a1d732 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @doc This module allows the simple execution of an asynchronous RPC over
@@ -88,7 +88,8 @@ call(RpcClient, Payload) ->
 %% Sets up a reply queue for this client to listen on
 setup_reply_queue(State = #state{channel = Channel}) ->
     #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Channel, #'queue.declare'{}),
+        amqp_channel:call(Channel, #'queue.declare'{exclusive   = true,
+                                                    auto_delete = true}),
     State#state{reply_queue = Q}.
 
 %% Registers this RPC client instance as a consumer to handle rpc responses
index 35e28a96d101a1bafb478b870254f1c9399fca23..14525362d7252994b982854a7e47efdbc5104886 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @doc This is a utility module that is used to expose an arbitrary function
index 76b9de49acc1399dc53f82a242adc8b8144198e5..dc916acbd53c5343c268f0fafab49e7d9b88085e 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @doc This module is an implementation of the amqp_gen_consumer
@@ -45,7 +45,8 @@
 
 -export([register_default_consumer/2]).
 -export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3,
-         handle_cancel/2, handle_server_cancel/2, handle_deliver/3,
+         handle_cancel/2, handle_server_cancel/2,
+         handle_deliver/3, handle_deliver/4,
          handle_info/2, handle_call/3, terminate/2]).
 
 -record(state, {consumers             = dict:new(), %% Tag -> ConsumerPid
@@ -154,8 +155,13 @@ handle_server_cancel(Cancel = #'basic.cancel'{nowait = true}, State) ->
     {ok, State1}.
 
 %% @private
-handle_deliver(Deliver, Message, State) ->
-    deliver(Deliver, Message, State),
+handle_deliver(Method, Message, State) ->
+    deliver(Method, Message, State),
+    {ok, State}.
+
+%% @private
+handle_deliver(Method, Message, DeliveryCtx, State) ->
+    deliver(Method, Message, DeliveryCtx, State),
     {ok, State}.
 
 %% @private
@@ -201,18 +207,26 @@ terminate(_Reason, State) ->
 %% Internal plumbing
 %%---------------------------------------------------------------------------
 
-deliver(Msg, State) ->
-    deliver(Msg, undefined, State).
-deliver(Msg, Message, State) ->
-    Combined = if Message =:= undefined -> Msg;
-                  true                  -> {Msg, Message}
-               end,
-    case resolve_consumer(tag(Msg), State) of
-        {consumer, Pid} -> Pid ! Combined;
-        {default, Pid}  -> Pid ! Combined;
+deliver_to_consumer_or_die(Method, Msg, State) ->
+    case resolve_consumer(tag(Method), State) of
+        {consumer, Pid} -> Pid ! Msg;
+        {default, Pid}  -> Pid ! Msg;
         error           -> exit(unexpected_delivery_and_no_default_consumer)
     end.
 
+deliver(Method, State) ->
+    deliver(Method, undefined, State).
+deliver(Method, Message, State) ->
+    Combined = if Message =:= undefined -> Method;
+                  true                  -> {Method, Message}
+               end,
+    deliver_to_consumer_or_die(Method, Combined, State).
+deliver(Method, Message, DeliveryCtx, State) ->
+    Combined = if Message =:= undefined -> Method;
+                  true                  -> {Method, Message, DeliveryCtx}
+               end,
+    deliver_to_consumer_or_die(Method, Combined, State).
+
 do_cancel(Cancel, State = #state{consumers = Consumers,
                                  monitors  = Monitors}) ->
     Tag = tag(Cancel),
index 984e9bb939a25ea369b12fc6a16525069f356ba3..9c928d55641259978a210d7655db14d9f0340841 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% @private
index 63609d9033c7e03a673034f72b095bdb652a66ca..04446b02265ed1c5fd3886cb64bd0a3677c0d67b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(amqp_uri).
index 55af2d69b34f05150f29969cdf3feb6c35df0334..7daa7287bfaa44a03c7bb197e094332eb20d713b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2013-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2013-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_routing_util).
@@ -79,8 +79,8 @@ parse_endpoint0(Type,     Rest,               _) ->
 
 %% --------------------------------------------------------------------------
 
-ensure_endpoint(Dir, Channel, EndPoint, State) ->
-    ensure_endpoint(Dir, Channel, EndPoint, [], State).
+ensure_endpoint(Dir, Channel, Endpoint, State) ->
+    ensure_endpoint(Dir, Channel, Endpoint, [], State).
 
 ensure_endpoint(source, Channel, {exchange, {Name, _}}, Params, State) ->
     check_exchange(Name, Channel,
@@ -106,7 +106,10 @@ ensure_endpoint(_, Channel, {queue, Name}, Params, State) ->
                                     #'queue.declare'{queue  = Queue,
                                                      nowait = true},
                                     queue, Params1),
-                         amqp_channel:cast(Channel, Method),
+                         case Method#'queue.declare'.nowait of
+                             true  -> amqp_channel:cast(Channel, Method);
+                             false -> amqp_channel:call(Channel, Method)
+                         end,
                          sets:add_element(Queue, State)
              end,
     {ok, Queue, State1};
@@ -119,9 +122,11 @@ ensure_endpoint(dest, Channel, {exchange, {Name, _}}, Params, State) ->
 ensure_endpoint(dest, _Ch, {topic, _}, _Params, State) ->
     {ok, undefined, State};
 
-ensure_endpoint(_, _Ch, {Type, Name}, _Params, State)
-  when Type =:= reply_queue orelse Type =:= amqqueue ->
-    {ok, list_to_binary(Name), State};
+ensure_endpoint(_, _Ch, {amqqueue, Name}, _Params, State) ->
+  {ok, list_to_binary(Name), State};
+
+ensure_endpoint(_, _Ch, {reply_queue, Name}, _Params, State) ->
+  {ok, list_to_binary(Name), State};
 
 ensure_endpoint(_Direction, _Ch, _Endpoint, _Params, _State) ->
     {error, invalid_endpoint}.
@@ -167,17 +172,50 @@ check_exchange(ExchangeName, Channel, true) ->
     #'exchange.declare_ok'{} = amqp_channel:call(Channel, XDecl),
     ok.
 
+update_queue_declare_arguments(Method, Params) ->
+    Method#'queue.declare'{arguments =
+                               proplists:get_value(arguments, Params, [])}.
+
+update_queue_declare_exclusive(Method, Params) ->
+    case proplists:get_value(exclusive, Params) of
+        undefined -> Method;
+        Val       -> Method#'queue.declare'{exclusive = Val}
+    end.
+
+update_queue_declare_auto_delete(Method, Params) ->
+    case proplists:get_value(auto_delete, Params) of
+        undefined -> Method;
+        Val       -> Method#'queue.declare'{auto_delete = Val}
+    end.
+
+update_queue_declare_nowait(Method, Params) ->
+    case proplists:get_value(nowait, Params) of
+        undefined -> Method;
+        Val       -> Method#'queue.declare'{nowait = Val}
+    end.
+
 queue_declare_method(#'queue.declare'{} = Method, Type, Params) ->
+    %% defaults
     Method1 = case proplists:get_value(durable, Params, false) of
                   true  -> Method#'queue.declare'{durable     = true};
                   false -> Method#'queue.declare'{auto_delete = true,
                                                   exclusive   = true}
               end,
+    %% set the rest of queue.declare fields from Params
+    Method2 = lists:foldl(fun (F, Acc) -> F(Acc, Params) end,
+                Method1, [fun update_queue_declare_arguments/2,
+                          fun update_queue_declare_exclusive/2,
+                          fun update_queue_declare_auto_delete/2,
+                          fun update_queue_declare_nowait/2]),
     case  {Type, proplists:get_value(subscription_queue_name_gen, Params)} of
         {topic, SQNG} when is_function(SQNG) ->
-            Method1#'queue.declare'{queue = SQNG()};
+            Method2#'queue.declare'{queue = SQNG()};
+        {exchange, SQNG} when is_function(SQNG) ->
+            Method2#'queue.declare'{queue = SQNG()};
+        {'reply-queue', SQNG} when is_function(SQNG) ->
+            Method2#'queue.declare'{queue = SQNG()};
         _ ->
-            Method1
+            Method2
     end.
 
 %% --------------------------------------------------------------------------
@@ -193,4 +231,3 @@ unescape(Str) -> unescape(Str, []).
 unescape("%2F" ++ Str, Acc) -> unescape(Str, [$/ | Acc]);
 unescape([C | Str],    Acc) -> unescape(Str, [C | Acc]);
 unescape([],           Acc) -> lists:reverse(Acc).
-
index e36d21a190fa179dd2a4b37ab393924688c3f700..ae3057fe1dc4c54adcf132aee8535afc8bd7d018 100644 (file)
@@ -11,7 +11,7 @@
 # The Original Code is RabbitMQ.
 #
 # The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 #
 
 IS_SUCCESS:=egrep "(All .+ tests (successful|passed).|Test passed.)"
index 5b55b5129a1011cd33ee33203ca301efd4fcbb9b..beef64cad8082615a475d55fa30dcc57a63ccb3e 100644 (file)
@@ -11,7 +11,7 @@
 # The Original Code is RabbitMQ.
 #
 # The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 #
 
 TEST_SOURCES=$(wildcard *.erl)
index 13958bba4ea3f6480b2a3789c2133c23a9cd0e93..e0bef04055706f878e661cf2e987ac5e6b898ef2 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(amqp_client_SUITE).
@@ -76,13 +76,14 @@ subscribe_nowait_test_()                -> ?RUN([]).
 connection_blocked_network_test_()      -> ?RUN([]).
 
 non_existent_exchange_test_()           -> ?RUN([negative]).
-bogus_rpc_test_()                    -> ?RUN([negative, repeat]).
-hard_error_test_()                   -> ?RUN([negative, repeat]).
+bogus_rpc_test_()                       -> ?RUN([negative, repeat]).
+hard_error_test_()                      -> ?RUN([negative, repeat]).
 non_existent_user_test_()               -> ?RUN([negative]).
 invalid_password_test_()                -> ?RUN([negative]).
 non_existent_vhost_test_()              -> ?RUN([negative]).
 no_permission_test_()                   -> ?RUN([negative]).
 channel_writer_death_test_()            -> ?RUN([negative]).
+connection_failure_test_()              -> ?RUN([negative]).
 channel_death_test_()                   -> ?RUN([negative]).
 shortstr_overflow_property_test_()      -> ?RUN([negative]).
 shortstr_overflow_field_test_()         -> ?RUN([negative]).
index f6b5cb0e6ea8d1fe78a4ccf07983dd993da5cfd8..cb20555ae19e582ab9efe1f3112082f197f95171 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(amqp_dbg).
index cd5910d40bc842811dc888c6e274b0bce50dc9ec..a4f962cb8289ce32b2933e870919b43df3aa0fba 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(negative_test_util).
@@ -80,11 +80,33 @@ hard_error_test() ->
     test_util:wait_for_death(Channel),
     test_util:wait_for_death(Connection).
 
+%% The connection should die if the underlying connection is prematurely
+%% closed. For a network connection, this means that the TCP socket is
+%% closed. For a direct connection (remotely only, of course), this means that
+%% the RabbitMQ node appears as down.
+connection_failure_test() ->
+    {ok, Connection} = test_util:new_connection(),
+    case amqp_connection:info(Connection, [type, amqp_params]) of
+        [{type, direct}, {amqp_params, Params}]  ->
+            case Params#amqp_params_direct.node of
+                N when N == node() ->
+                    amqp_connection:close(Connection);
+                N ->
+                    true = erlang:disconnect_node(N),
+                    net_adm:ping(N)
+            end;
+        [{type, network}, {amqp_params, _}] ->
+            [{sock, Sock}] = amqp_connection:info(Connection, [sock]),
+            ok = gen_tcp:close(Sock)
+    end,
+    test_util:wait_for_death(Connection),
+    ok.
+
 %% An error in a channel should result in the death of the entire connection.
 %% The death of the channel is caused by an error in generating the frames
-%% (writer dies) - only in the network case
+%% (writer dies)
 channel_writer_death_test() ->
-    {ok, Connection} = test_util:new_connection(just_network),
+    {ok, Connection} = test_util:new_connection(),
     {ok, Channel} = amqp_connection:open_channel(Connection),
     Publish = #'basic.publish'{routing_key = <<>>, exchange = <<>>},
     QoS = #'basic.qos'{prefetch_count = 0},
index 121a40056dc38c025b879dffd308d27772697a7e..b6744236377a9545f491d50bcb2925280e05ed42 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(test_util).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-federation-management/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-federation-management/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 5d3e225fccbed83697d0eb7140f77e179ecb061f..7110ea260fcf464d09d78c7eb7ddd768b040acd6 100644 (file)
@@ -66,5 +66,5 @@ HELP['federation-trust-user-id'] =
     'Set "Yes" to preserve the "user-id" field across a federation link, even if the user-id does not match that used to republish the message. Set to "No" to clear the "user-id" field when messages are federated. Only set this to "Yes" if you trust the upstream broker not to forge user-ids.';
 
 function link_fed_conn(vhost, name) {
-    return _link_to(fmt_escape_html(name), '#/federation-upstreams/' + esc(vhost) + '/' + esc(name));
+    return _link_to(name, '#/federation-upstreams/' + esc(vhost) + '/' + esc(name));
 }
index 4271aad761acaf742426d33d80308e2d1126a25c..6e66b4e336c3377b5d7c67212ec77393b4998686 100644 (file)
@@ -1,13 +1,9 @@
-<h1>Federation Upstream: <b><%= fmt_string(upstream.name) %></b></h1>
+<h1>Federation Upstream: <b><%= fmt_string(upstream.name) %></b><%= fmt_maybe_vhost(upstream.vhost) %></h1>
 
 <div class="section">
   <h2>Overview</h2>
   <div class="hider">
     <table class="facts">
-      <tr>
-        <th>Virtual Host</th>
-        <td><%= fmt_string(upstream.vhost) %></td>
-      </tr>
       <tr>
         <th>URI</th>
         <td><%= fmt_string(upstream.value.uri) %></td>
index 6fe39907c8b2d8c6622db0c33b2e3b46d1d270d9..15910c968a9ee718da2d12eec03a3ac03d33022e 100644 (file)
@@ -74,7 +74,7 @@
     </td>
     <td class="r">
       <% if (link.local_channel) { %>
-        <%= fmt_rate(link.local_channel.message_stats, 'confirm') %>
+        <%= fmt_detail_rate(link.local_channel.message_stats, 'confirm') %>
       <% } %>
     </td>
     <td><%= link.timestamp %></td>
index 818f3a938ad0efff73df4d434d12f53320fbbf95..10dde014bf4752ec1fcd627e4239b83292544aa5 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_mgmt).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-federation/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-federation/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 66c8a8a9ae0738d7da8f1c79dc4b1044a832b7a1..0995cfd68cf8179ea446f63db543e5b40cb1eb91 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -record(upstream, {uris,
@@ -25,7 +25,8 @@
                    trust_user_id,
                    ack_mode,
                    ha_policy,
-                   name}).
+                   name,
+                   bind_nowait}).
 
 -record(upstream_params,
         {uri,
index d4bdc29eadc90d6104ab4faaa4d62166d4fbf3ab..119ef60a33f979f76b1130709dfe2bd3a6699559 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_app).
index 7faf3eb5137661ea18aecd28e31145ecf3e364eb..d00f9914dc68013411396a9b8fb94a085848d818 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_db).
index 856bd1b3c12bbc3465592f9fba509bb0f029270b..677d5f218cca422142464fe86fa21d95169c9f44 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_event).
@@ -19,7 +19,7 @@
 
 -include_lib("rabbit_common/include/rabbit.hrl").
 
--export([add_handler/0]).
+-export([add_handler/0, remove_handler/0]).
 
 -export([init/1, handle_call/2, handle_event/2, handle_info/2,
          terminate/2, code_change/3]).
@@ -31,6 +31,9 @@
 add_handler() ->
     gen_event:add_handler(rabbit_event, ?MODULE, []).
 
+remove_handler() ->
+    gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
 init([]) ->
     {ok, []}.
 
index 69bce12c3fe34f1204c3ef27c7bf743484d57c7f..fa6102ca21641e6b1ab0a056822c8394107f7829 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% TODO rename this
 
 -rabbit_boot_step({?MODULE,
                    [{description, "federation exchange decorator"},
-                    {mfa, {rabbit_registry, register,
-                           [exchange_decorator, <<"federation">>, ?MODULE]}},
+                    {mfa, {rabbit_exchange_decorator, register,
+                           [<<"federation">>, ?MODULE]}},
                     {requires, rabbit_registry},
+                    {cleanup, {rabbit_exchange_decorator, unregister,
+                               [<<"federation">>]}},
                     {enables, recovery}]}).
 
 -include_lib("amqp_client/include/amqp_client.hrl").
index 27300450d936c850ba2ce13a21841d393d9fc6ca..12f53168af2dea14ddff8bec5fcec3a747c03f36 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_exchange_link).
@@ -274,24 +274,28 @@ binding_op(UpdateFun, Cmd, B = #binding{args = Args},
 
 bind_cmd(Type, #binding{key = Key, args = Args},
          State = #state{internal_exchange = IntXNameBin,
-                        upstream_params   = UpstreamParams}) ->
+                        upstream_params   = UpstreamParams,
+                        upstream          = Upstream}) ->
     #upstream_params{x_or_q = X} = UpstreamParams,
+    #upstream{bind_nowait = Nowait} = Upstream,
     case update_binding(Args, State) of
         ignore  -> ignore;
-        NewArgs -> bind_cmd0(Type, name(X), IntXNameBin, Key, NewArgs)
+        NewArgs -> bind_cmd0(Type, name(X), IntXNameBin, Key, NewArgs, Nowait)
     end.
 
-bind_cmd0(bind, Source, Destination, RoutingKey, Arguments) ->
+bind_cmd0(bind, Source, Destination, RoutingKey, Arguments, Nowait) ->
     #'exchange.bind'{source      = Source,
                      destination = Destination,
                      routing_key = RoutingKey,
-                     arguments   = Arguments};
+                     arguments   = Arguments,
+                     nowait      = Nowait};
 
-bind_cmd0(unbind, Source, Destination, RoutingKey, Arguments) ->
+bind_cmd0(unbind, Source, Destination, RoutingKey, Arguments, Nowait) ->
     #'exchange.unbind'{source      = Source,
                        destination = Destination,
                        routing_key = RoutingKey,
-                       arguments   = Arguments}.
+                       arguments   = Arguments,
+                       nowait      = Nowait}.
 
 %% This function adds information about the current node to the
 %% binding arguments, or returns 'ignore' if it determines the binding
index 239fcbf9f813c36dd927b4c01cbf5bbdf97078be..529edea1134465473c2e203743acdfbe0a677813 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_exchange_link_sup_sup).
@@ -62,4 +62,5 @@ init([]) ->
     {ok, {{one_for_one, 3, 10}, []}}.
 
 %% See comment in rabbit_federation_queue_link_sup_sup:id/1
-id(X = #exchange{}) -> X#exchange{scratches = none}.
+id(X = #exchange{policy = Policy}) -> X1 = rabbit_exchange:immutable(X),
+                                      X1#exchange{policy = Policy}.
index dd5f9ceca3274b613a65fd2db6b1b82a1e739548..2999a189b87798a7338c4a035088c8239135163b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_link_sup).
index d88f09e8cd05f412e75060a009f77cdaa42451dd..757331f67a58525164c2bb711f001af332ef4902 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_link_util).
index 2385bc02c0a47bc712cf8b4b438836afba99e91e..c05f4c07e28e149bdd01de2462705796b81b8760 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_parameters).
 -include_lib("rabbit_common/include/rabbit.hrl").
 
 -export([validate/5, notify/4, notify_clear/3]).
--export([register/0, validate_policy/1, adjust/1]).
+-export([register/0, unregister/0, validate_policy/1, adjust/1]).
+
+-define(RUNTIME_PARAMETERS,
+        [{runtime_parameter, <<"federation">>},
+         {runtime_parameter, <<"federation-upstream">>},
+         {runtime_parameter, <<"federation-upstream-set">>},
+         {policy_validator,  <<"federation-upstream">>},
+         {policy_validator,  <<"federation-upstream-set">>}]).
 
 -rabbit_boot_step({?MODULE,
                    [{description, "federation parameters"},
                     {mfa, {rabbit_federation_parameters, register, []}},
                     {requires, rabbit_registry},
+                    {cleanup, {rabbit_federation_parameters, unregister, []}},
                     {enables, recovery}]}).
 
 register() ->
     [rabbit_registry:register(Class, Name, ?MODULE) ||
-        {Class, Name} <- [{runtime_parameter, <<"federation">>},
-                          {runtime_parameter, <<"federation-upstream">>},
-                          {runtime_parameter, <<"federation-upstream-set">>},
-                          {policy_validator,  <<"federation-upstream">>},
-                          {policy_validator,  <<"federation-upstream-set">>}]],
+        {Class, Name} <- ?RUNTIME_PARAMETERS],
+    ok.
+
+unregister() ->
+    [rabbit_registry:unregister(Class, Name) ||
+        {Class, Name} <- ?RUNTIME_PARAMETERS],
     ok.
 
 validate(_VHost, <<"federation-upstream-set">>, Name, Term, _User) ->
@@ -82,7 +91,8 @@ shared_validation() ->
      {<<"trust-user-id">>,  fun rabbit_parameter_validation:boolean/2, optional},
      {<<"ack-mode">>,       rabbit_parameter_validation:enum(
                               ['no-ack', 'on-publish', 'on-confirm']), optional},
-     {<<"ha-policy">>,      fun rabbit_parameter_validation:binary/2, optional}].
+     {<<"ha-policy">>,      fun rabbit_parameter_validation:binary/2, optional},
+     {<<"bind-nowait">>,    fun rabbit_parameter_validation:boolean/2, optional}].
 
 validate_uri(Name, Term) when is_binary(Term) ->
     case rabbit_parameter_validation:binary(Name, Term) of
index 7e70e0ff86797dc3200e592139d4d567cc8d63d6..49c4f40207007531c26d027905339b61be299818 100644 (file)
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_queue).
 
 -rabbit_boot_step({?MODULE,
                    [{description, "federation queue decorator"},
-                    {mfa, {rabbit_registry, register,
-                           [queue_decorator, <<"federation">>, ?MODULE]}},
+                    {mfa, {rabbit_queue_decorator, register,
+                           [<<"federation">>, ?MODULE]}},
                     {requires, rabbit_registry},
+                    {cleanup, {rabbit_queue_decorator, unregister,
+                               [<<"federation">>]}},
                     {enables, recovery}]}).
 
 -include_lib("amqp_client/include/amqp_client.hrl").
index 2daaee23350ec21a4f1c36d13cb26d14caf6cfca..4dd7810c01385e71ffd46dd8fb10b1aa20cbf3dd 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_queue_link).
index ee1a8fea8a4b0bbac759bc2caba14762f752ba70..9c6a70344785d1be1803a129b5617076092e95b2 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_queue_link_sup_sup).
@@ -59,16 +59,14 @@ stop_child(Q) ->
 init([]) ->
     {ok, {{one_for_one, 3, 10}, []}}.
 
-%% Clean out all transient aspects of the queue. We need to keep the
-%% entire queue around rather than just take its name since we will
-%% want to know its policy to determine how to federate it, and its
-%% immutable properties in case we want to redeclare it upstream. We
-%% don't just take its name and look it up again since that would
-%% introduce race conditions when policies change frequently.  Note
-%% that since we take down all the links and start again when policies
-%% change, the policy will always be correct, so we don't clear it out
-%% here and can trust it.
-id(Q = #amqqueue{}) -> Q#amqqueue{pid             = none,
-                                  slave_pids      = none,
-                                  sync_slave_pids = none,
-                                  gm_pids         = none}.
+%% Clean out all mutable aspects of the queue except policy. We need
+%% to keep the entire queue around rather than just take its name
+%% since we will want to know its policy to determine how to federate
+%% it, and its immutable properties in case we want to redeclare it
+%% upstream. We don't just take its name and look it up again since
+%% that would introduce race conditions when policies change
+%% frequently.  Note that since we take down all the links and start
+%% again when policies change, the policy will always be correct, so
+%% we don't clear it out here and can trust it.
+id(Q = #amqqueue{policy = Policy}) -> Q1 = rabbit_amqqueue:immutable(Q),
+                                      Q1#amqqueue{policy = Policy}.
index 7fd7e7e67e84214c8215e7027189631f90d6e76d..59dc79e15e2be31195ed9018429438643e036c5b 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ Federation.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_status).
index 390b1156a33426824257938b6ec949c398392c43..52a837d0f1f7a93062924a2b9725a737ab90b18e 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_sup).
@@ -23,7 +23,7 @@
 -include_lib("rabbit_common/include/rabbit.hrl").
 -define(SUPERVISOR, rabbit_federation_sup).
 
--export([start_link/0]).
+-export([start_link/0, stop/0]).
 
 -export([init/1]).
 
@@ -35,7 +35,9 @@
                    [{description, "federation"},
                     {mfa,         {rabbit_sup, start_child, [?MODULE]}},
                     {requires,    kernel_ready},
-                    {enables,     rabbit_federation_exchange}]}).
+                    {cleanup,     {?MODULE, stop, []}},
+                    {enables,     rabbit_federation_exchange},
+                    {enables,     rabbit_federation_queue}]}).
 
 %%----------------------------------------------------------------------------
 
@@ -44,6 +46,11 @@ start_link() ->
     rabbit_federation_event:add_handler(),
     R.
 
+stop() ->
+    rabbit_federation_event:remove_handler(),
+    ok = supervisor:terminate_child(rabbit_sup, ?MODULE),
+    ok = supervisor:delete_child(rabbit_sup, ?MODULE).
+
 %%----------------------------------------------------------------------------
 
 init([]) ->
index 168fce0fdec03f1ede1fdc689575a9451cce34a3..398dbcf6cba8d4d907747f4f21aff65c93791cce 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_upstream).
@@ -131,7 +131,8 @@ from_upstream_or_set(US, Name, U, XorQ) ->
                                   binary_to_list(
                                     bget('ack-mode', US, U, <<"on-confirm">>))),
               ha_policy       = bget('ha-policy',       US, U, none),
-              name            = Name}.
+              name            = Name,
+              bind_nowait     = bget('bind-nowait',     US, U, false)}.
 
 %%----------------------------------------------------------------------------
 
index e84cb2990fb3d6955a7e754013988936da01f2d3..920bc9fea2301932b60e349f4dfc0aa10b634014 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_upstream_exchange).
@@ -21,6 +21,8 @@
                     {mfa, {rabbit_registry, register,
                            [exchange, <<"x-federation-upstream">>, ?MODULE]}},
                     {requires, rabbit_registry},
+                    {cleanup, {rabbit_registry, unregister,
+                               [exchange, <<"x-federation-upstream">>]}},
                     {enables, recovery}]}).
 
 -include_lib("rabbit_common/include/rabbit.hrl").
index b6a1cfd8261610b97ac179a5d94079f86b0742b6..33e903e281191e4581e34508744d51468b4ac65a 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_util).
index 96913df327ceb598c712bb425ff1b0e0baf7b619..cce16f856c75aa36698e7a4e5e7e2f86949a0501 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_exchange_test).
@@ -23,6 +23,7 @@
 
 -import(rabbit_misc, [pget/2]).
 -import(rabbit_federation_util, [name/1]).
+-import(rabbit_test_util, [enable_plugin/2, disable_plugin/2]).
 
 -import(rabbit_federation_test_util,
         [expect/3, expect_empty/2,
@@ -566,6 +567,45 @@ federate_unfederate_test() ->
               assert_connections(Xs, [])
       end, [x(<<"dyn.exch1">>), x(<<"dyn.exch2">>)]).
 
+dynamic_plugin_stop_start_test() ->
+    Cfg = single_cfg(),
+    X1 = <<"dyn.exch1">>,
+    X2 = <<"dyn.exch2">>,
+    with_ch(
+      fun (Ch) ->
+              set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+
+              %% Declare federated exchange - get link
+              assert_connections([X1], [<<"localhost">>]),
+
+              %% Disable plugin, link goes
+              ok = disable_plugin(Cfg, "rabbitmq_federation"),
+              %% We can't check with status for obvious reasons...
+              undefined = whereis(rabbit_federation_sup),
+              {error, not_found} = rabbit_registry:lookup_module(
+                                     exchange, 'x-federation-upstream'),
+
+              %% Create exchange then re-enable plugin, links appear
+              declare_exchange(Ch, x(X2)),
+              ok = enable_plugin(Cfg, "rabbitmq_federation"),
+              assert_connections([X1, X2], [<<"localhost">>]),
+              {ok, _} = rabbit_registry:lookup_module(
+                          exchange, 'x-federation-upstream'),
+
+              %% Test both exchanges work. They are just federated to
+              %% themselves so should duplicate messages.
+              [begin
+                   Q = bind_queue(Ch, X, <<"key">>),
+                   await_binding(Cfg, X, <<"key">>, 2),
+                   publish(Ch, X, <<"key">>, <<"HELLO">>),
+                   expect(Ch, Q, [<<"HELLO">>, <<"HELLO">>]),
+                   delete_queue(Ch, Q)
+               end || X <- [X1, X2]],
+
+              clear_policy(Cfg, <<"dyn">>),
+              assert_connections([X1, X2], [])
+      end, [x(X1)]).
+
 %%----------------------------------------------------------------------------
 
 with_ch(Fun, Xs) ->
index 5d521d2a6afbb08047b84188d3937c4784429add..d58c0d64501b44f7a4eaa368f088dbfecb5ac498 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_queue_test).
@@ -23,6 +23,7 @@
 
 -import(rabbit_misc, [pget/2]).
 -import(rabbit_federation_util, [name/1]).
+-import(rabbit_test_util, [enable_plugin/2, disable_plugin/2]).
 
 -import(rabbit_federation_test_util,
         [expect/3,
@@ -119,6 +120,32 @@ federate_unfederate_test() ->
             q(<<"upstream2">>),
             q(<<"downstream">>)]).
 
+dynamic_plugin_stop_start_test() ->
+    Cfg = single_cfg(),
+    Q1 = <<"dyn.q1">>,
+    Q2 = <<"dyn.q2">>,
+    U = <<"upstream">>,
+    with_ch(
+      fun (Ch) ->
+              set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, U),
+              %% Declare federated queue - get link
+              expect_federation(Ch, U, Q1),
+
+              %% Disable plugin, link goes
+              ok = disable_plugin(Cfg, "rabbitmq_federation"),
+              expect_no_federation(Ch, U, Q1),
+
+              %% Create exchange then re-enable plugin, links appear
+              declare_queue(Ch, q(Q2)),
+              ok = enable_plugin(Cfg, "rabbitmq_federation"),
+              expect_federation(Ch, U, Q1),
+              expect_federation(Ch, U, Q2),
+
+              clear_policy(Cfg, <<"dyn">>),
+              expect_no_federation(Ch, U, Q1),
+              expect_no_federation(Ch, U, Q2),
+              delete_queue(Ch, Q2)
+      end, [q(Q1), q(U)]).
 
 %% Downstream: rabbit-test, port 5672
 %% Upstream:   hare,        port 5673
index 87a584e340f19f8da4d273b80d4ba2c7180a0a32..d70042e3a84ce46f740ab16f11056d7f343161a9 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_test_util).
index 3a1cf8bc8479c9a71360e11b1980c09bc86f48ff..76d23b80336498291da962482b5631d632e5efe6 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_federation_unit_test).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management-agent/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-management-agent/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 2b47cfd9b36980c3fda639ea4a867f86f0df72ff..6220ac671258538e28778aa89c92777e85e59266 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_agent_app).
 -behaviour(application).
 -export([start/2, stop/1]).
 
-%% Make sure our database is hooked in *before* listening on the network or
-%% recovering queues (i.e. so there can't be any events fired before it starts).
--rabbit_boot_step({rabbit_mgmt_db_handler,
-                   [{description, "management agent"},
-                    {mfa,         {rabbit_mgmt_db_handler, add_handler,
-                                   []}},
-                    {requires,    rabbit_event},
-                    {enables,     recovery}]}).
-
-
 start(_Type, _StartArgs) ->
     rabbit_mgmt_agent_sup:start_link().
 
index 647b99fbc1ac4eae7da1ba66e1c5c99e9f7ac7e7..cd0635fbe49ce669400a0220ab246a76d37d3c5f 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_agent_sup).
index a3012bbddbc3afe886089cff61bd7f6f2f09bc6d..5bd9bc01d15dc56d1ce4229aaa8a5f8a854a524a 100644 (file)
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_db_handler).
 
+%% Make sure our database is hooked in *before* listening on the network or
+%% recovering queues (i.e. so there can't be any events fired before it starts).
+-rabbit_boot_step({rabbit_mgmt_db_handler,
+                   [{description, "management agent"},
+                    {mfa,         {?MODULE, add_handler, []}},
+                    {cleanup,     {gen_event, delete_handler,
+                                   [rabbit_event, ?MODULE, []]}},
+                    {requires,    rabbit_event},
+                    {enables,     recovery}]}).
+
 -behaviour(gen_event).
 
--export([add_handler/0, gc/0]).
+-export([add_handler/0, gc/0, rates_mode/0]).
 
 -export([init/1, handle_call/2, handle_event/2, handle_info/2,
          terminate/2, code_change/3]).
 
 add_handler() ->
     ensure_statistics_enabled(),
-    gen_event:add_sup_handler(rabbit_event, ?MODULE, []).
+    gen_event:add_handler(rabbit_event, ?MODULE, []).
 
 gc() ->
     erlang:garbage_collect(whereis(rabbit_event)).
 
+%% some people have reasons to only run with the agent enabled:
+%% make it possible for them to configure key management app
+%% settings such as rates_mode.
+get_management_env(Key) ->
+    rabbit_misc:get_env(
+      rabbitmq_management, Key,
+      rabbit_misc:get_env(rabbitmq_management_agent, Key, undefined)).
+
+rates_mode() ->
+    case get_management_env(rates_mode) of
+        undefined -> basic;
+        Mode      -> Mode
+    end.
+
+handle_force_fine_statistics() ->
+    case get_management_env(force_fine_statistics) of
+        undefined ->
+            ok;
+        X ->
+            rabbit_log:warning(
+              "force_fine_statistics set to ~p; ignored.~n"
+              "Replaced by {rates_mode, none} in the rabbitmq_management "
+              "application.~n", [X])
+    end.
+
 %%----------------------------------------------------------------------------
 
 ensure_statistics_enabled() ->
-    {ok, ForceStats} = application:get_env(rabbitmq_management_agent,
-                                           force_fine_statistics),
+    ForceStats = rates_mode() =/= none,
+    handle_force_fine_statistics(),
     {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+    rabbit_log:info("Management plugin: using rates mode '~p'~n", [rates_mode()]),
     case {ForceStats, StatsLevel} of
         {true,  fine} ->
             ok;
index 805e44ee8dc09aaefbc7f831a11aea2b09a2c2c2..62f783bfdf41a39ad309b955bb819a609726f331 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_external_stats).
 
 -export([list_registry_plugins/1]).
 
+-import(rabbit_misc, [pget/2]).
+
 -include_lib("rabbit_common/include/rabbit.hrl").
 
 -define(REFRESH_RATIO, 5000).
 -define(KEYS, [name, partitions, os_pid, fd_used, fd_total,
                sockets_used, sockets_total, mem_used, mem_limit, mem_alarm,
                disk_free_limit, disk_free, disk_free_alarm,
-               proc_used, proc_total, statistics_level,
+               proc_used, proc_total, rates_mode,
                uptime, run_queue, processors, exchange_types,
-               auth_mechanisms, applications, contexts]).
+               auth_mechanisms, applications, contexts,
+               log_file, sasl_log_file, db_dir, config_files, net_ticktime,
+               enabled_plugins, persister_stats]).
 
 %%--------------------------------------------------------------------
 
--record(state, {fd_total}).
+-record(state, {fd_total, fhc_stats, fhc_stats_derived, node_owners}).
 
 %%--------------------------------------------------------------------
 
@@ -172,23 +176,34 @@ i(disk_free_limit, _State) -> get_disk_free_limit();
 i(disk_free,       _State) -> get_disk_free();
 i(disk_free_alarm, _State) -> resource_alarm_set(disk);
 i(contexts,        _State) -> rabbit_web_dispatch_contexts();
-i(uptime, _State) ->
-    {Total, _} = erlang:statistics(wall_clock),
-    Total;
-i(statistics_level, _State) ->
-    {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
-    StatsLevel;
-i(exchange_types, _State) ->
-    list_registry_plugins(exchange);
+i(uptime,          _State) -> {Total, _} = erlang:statistics(wall_clock),
+                                Total;
+i(rates_mode,      _State) -> rabbit_mgmt_db_handler:rates_mode();
+i(exchange_types,  _State) -> list_registry_plugins(exchange);
+i(log_file,        _State) -> log_location(kernel);
+i(sasl_log_file,   _State) -> log_location(sasl);
+i(db_dir,          _State) -> list_to_binary(rabbit_mnesia:dir());
+i(config_files,    _State) -> [list_to_binary(F) || F <- rabbit:config_files()];
+i(net_ticktime,    _State) -> net_kernel:get_net_ticktime();
+i(persister_stats,  State) -> persister_stats(State);
+i(enabled_plugins, _State) -> {ok, Dir} = application:get_env(
+                                           rabbit, enabled_plugins_file),
+                              rabbit_plugins:read_enabled(Dir);
 i(auth_mechanisms, _State) ->
     {ok, Mechanisms} = application:get_env(rabbit, auth_mechanisms),
     list_registry_plugins(
       auth_mechanism,
       fun (N) -> lists:member(list_to_atom(binary_to_list(N)), Mechanisms) end);
-i(applications, _State) ->
+i(applications,    _State) ->
     [format_application(A) ||
         A <- lists:keysort(1, rabbit_misc:which_applications())].
 
+log_location(Type) ->
+    case rabbit:log_location(Type) of
+        tty  -> <<"tty">>;
+        File -> list_to_binary(File)
+    end.
+
 resource_alarm_set(Source) ->
     lists:member({{resource_limit, Source, node()},[]},
                  rabbit_alarm:get_alarms()).
@@ -212,6 +227,51 @@ set_plugin_name(Name, Module) ->
     [{name, list_to_binary(atom_to_list(Name))} |
      proplists:delete(name, Module:description())].
 
+persister_stats(#state{fhc_stats         = FHC,
+                       fhc_stats_derived = FHCD}) ->
+    [{flatten_key(K), V} || {{_Op, Type} = K, V} <- FHC,
+                            Type =/= time] ++
+        [{flatten_key(K), V} || {K, V} <- FHCD].
+
+flatten_key({A, B}) ->
+    list_to_atom(atom_to_list(A) ++ "_" ++ atom_to_list(B)).
+
+cluster_links() ->
+    {ok, Items} = net_kernel:nodes_info(),
+    [Link || Item <- Items,
+             Link <- [format_nodes_info(Item)], Link =/= undefined].
+
+format_nodes_info({Node, Info}) ->
+    Owner = proplists:get_value(owner, Info),
+    case catch process_info(Owner, links) of
+        {links, Links} ->
+            case [Link || Link <- Links, is_port(Link)] of
+                [Port] ->
+                    {Node, Owner, format_nodes_info1(Port)};
+                _ ->
+                    undefined
+            end;
+        _ ->
+            undefined
+    end.
+
+format_nodes_info1(Port) ->
+    case {rabbit_net:socket_ends(Port, inbound),
+          rabbit_net:getstat(Port, [recv_oct, send_oct])} of
+        {{ok, {PeerAddr, PeerPort, SockAddr, SockPort}}, {ok, Stats}} ->
+            [{peer_addr, maybe_ntoab(PeerAddr)},
+             {peer_port, PeerPort},
+             {sock_addr, maybe_ntoab(SockAddr)},
+             {sock_port, SockPort},
+             {recv_bytes, pget(recv_oct, Stats)},
+             {send_bytes, pget(send_oct, Stats)}];
+        _ ->
+            []
+    end.
+
+maybe_ntoab(A) when is_tuple(A) -> list_to_binary(rabbit_misc:ntoab(A));
+maybe_ntoab(H)                  -> H.
+
 %%--------------------------------------------------------------------
 
 %% This is slightly icky in that we introduce knowledge of
@@ -244,17 +304,18 @@ format_mochiweb_option_list(C) ->
 
 format_mochiweb_option(ssl_opts, V) ->
     format_mochiweb_option_list(V);
-format_mochiweb_option(ciphers, V) ->
-    list_to_binary(rabbit_misc:format("~w", [V]));
-format_mochiweb_option(_K, V) when is_list(V) ->
-    list_to_binary(V);
 format_mochiweb_option(_K, V) ->
-    V.
+    case io_lib:printable_list(V) of
+        true  -> list_to_binary(V);
+        false -> list_to_binary(rabbit_misc:format("~w", [V]))
+    end.
 
 %%--------------------------------------------------------------------
 
 init([]) ->
-    State = #state{fd_total = file_handle_cache:ulimit()},
+    State = #state{fd_total    = file_handle_cache:ulimit(),
+                   fhc_stats   = file_handle_cache_stats:get(),
+                   node_owners = sets:new()},
     %% If we emit an update straight away we will do so just before
     %% the mgmt db starts up - and then have to wait ?REFRESH_RATIO
     %% until we send another. So let's have a shorter wait in the hope
@@ -282,7 +343,39 @@ code_change(_, State, _) -> {ok, State}.
 
 %%--------------------------------------------------------------------
 
-emit_update(State) ->
+emit_update(State0) ->
+    State = update_state(State0),
     rabbit_event:notify(node_stats, infos(?KEYS, State)),
     erlang:send_after(?REFRESH_RATIO, self(), emit_update),
-    State.
+    emit_node_node_stats(State).
+
+emit_node_node_stats(State = #state{node_owners = Owners}) ->
+    Links = cluster_links(),
+    NewOwners = sets:from_list([{Node, Owner} || {Node, Owner, _} <- Links]),
+    Dead = sets:to_list(sets:subtract(Owners, NewOwners)),
+    [rabbit_event:notify(
+       node_node_deleted, [{route, Route}]) || {Node, _Owner} <- Dead,
+                                               Route <- [{node(), Node},
+                                                         {Node,   node()}]],
+    [rabbit_event:notify(
+       node_node_stats, [{route, {node(), Node}} | Stats]) ||
+        {Node, _Owner, Stats} <- Links],
+    State#state{node_owners = NewOwners}.
+
+update_state(State0 = #state{fhc_stats = FHC0}) ->
+    FHC = file_handle_cache_stats:get(),
+    Avgs = [{{Op, avg_time}, avg_op_time(Op, V, FHC, FHC0)}
+            || {{Op, time}, V} <- FHC],
+    State0#state{fhc_stats         = FHC,
+                 fhc_stats_derived = Avgs}.
+
+-define(MICRO_TO_MILLI, 1000).
+
+avg_op_time(Op, Time, FHC, FHC0) ->
+    Time0 = pget({Op, time}, FHC0),
+    TimeDelta = Time - Time0,
+    OpDelta = pget({Op, count}, FHC) - pget({Op, count}, FHC0),
+    case OpDelta of
+        0 -> 0;
+        _ -> (TimeDelta / OpDelta) / ?MICRO_TO_MILLI
+    end.
index e94c97b06566262c4455bc4ed5b634bb5f337e41..bd4b8261f7a22fc4ee56d1f153ac6533d399042a 100644 (file)
@@ -4,5 +4,5 @@
   {modules, []},
   {registered, []},
   {mod, {rabbit_mgmt_agent_app, []}},
-  {env, [{force_fine_statistics, true}]},
+  {env, []},
   {applications, [kernel, stdlib, rabbit]}]}.
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management-visualiser/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-management-visualiser/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 4e4a830d10a717a0de6796922ccbae32c3b9cf05..8411d430b8204d4f8097f5353cfba553e7dda6c8 100644 (file)
@@ -1,32 +1,32 @@
-// glMatrix v0.9.5
-glMatrixArrayType=typeof Float32Array!="undefined"?Float32Array:typeof WebGLFloatArray!="undefined"?WebGLFloatArray:Array;var vec3={};vec3.create=function(a){var b=new glMatrixArrayType(3);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2]}return b};vec3.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];return b};vec3.add=function(a,b,c){if(!c||a==c){a[0]+=b[0];a[1]+=b[1];a[2]+=b[2];return a}c[0]=a[0]+b[0];c[1]=a[1]+b[1];c[2]=a[2]+b[2];return c};
-vec3.subtract=function(a,b,c){if(!c||a==c){a[0]-=b[0];a[1]-=b[1];a[2]-=b[2];return a}c[0]=a[0]-b[0];c[1]=a[1]-b[1];c[2]=a[2]-b[2];return c};vec3.negate=function(a,b){b||(b=a);b[0]=-a[0];b[1]=-a[1];b[2]=-a[2];return b};vec3.scale=function(a,b,c){if(!c||a==c){a[0]*=b;a[1]*=b;a[2]*=b;return a}c[0]=a[0]*b;c[1]=a[1]*b;c[2]=a[2]*b;return c};
-vec3.normalize=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=Math.sqrt(c*c+d*d+e*e);if(g){if(g==1){b[0]=c;b[1]=d;b[2]=e;return b}}else{b[0]=0;b[1]=0;b[2]=0;return b}g=1/g;b[0]=c*g;b[1]=d*g;b[2]=e*g;return b};vec3.cross=function(a,b,c){c||(c=a);var d=a[0],e=a[1];a=a[2];var g=b[0],f=b[1];b=b[2];c[0]=e*b-a*f;c[1]=a*g-d*b;c[2]=d*f-e*g;return c};vec3.length=function(a){var b=a[0],c=a[1];a=a[2];return Math.sqrt(b*b+c*c+a*a)};vec3.dot=function(a,b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]};
-vec3.direction=function(a,b,c){c||(c=a);var d=a[0]-b[0],e=a[1]-b[1];a=a[2]-b[2];b=Math.sqrt(d*d+e*e+a*a);if(!b){c[0]=0;c[1]=0;c[2]=0;return c}b=1/b;c[0]=d*b;c[1]=e*b;c[2]=a*b;return c};vec3.lerp=function(a,b,c,d){d||(d=a);d[0]=a[0]+c*(b[0]-a[0]);d[1]=a[1]+c*(b[1]-a[1]);d[2]=a[2]+c*(b[2]-a[2]);return d};vec3.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+"]"};var mat3={};
-mat3.create=function(a){var b=new glMatrixArrayType(9);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9]}return b};mat3.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];return b};mat3.identity=function(a){a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=1;a[5]=0;a[6]=0;a[7]=0;a[8]=1;return a};
-mat3.transpose=function(a,b){if(!b||a==b){var c=a[1],d=a[2],e=a[5];a[1]=a[3];a[2]=a[6];a[3]=c;a[5]=a[7];a[6]=d;a[7]=e;return a}b[0]=a[0];b[1]=a[3];b[2]=a[6];b[3]=a[1];b[4]=a[4];b[5]=a[7];b[6]=a[2];b[7]=a[5];b[8]=a[8];return b};mat3.toMat4=function(a,b){b||(b=mat4.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=0;b[4]=a[3];b[5]=a[4];b[6]=a[5];b[7]=0;b[8]=a[6];b[9]=a[7];b[10]=a[8];b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};
-mat3.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]+", "+a[6]+", "+a[7]+", "+a[8]+"]"};var mat4={};mat4.create=function(a){var b=new glMatrixArrayType(16);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15]}return b};
-mat4.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15];return b};mat4.identity=function(a){a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=0;a[5]=1;a[6]=0;a[7]=0;a[8]=0;a[9]=0;a[10]=1;a[11]=0;a[12]=0;a[13]=0;a[14]=0;a[15]=1;return a};
-mat4.transpose=function(a,b){if(!b||a==b){var c=a[1],d=a[2],e=a[3],g=a[6],f=a[7],h=a[11];a[1]=a[4];a[2]=a[8];a[3]=a[12];a[4]=c;a[6]=a[9];a[7]=a[13];a[8]=d;a[9]=g;a[11]=a[14];a[12]=e;a[13]=f;a[14]=h;return a}b[0]=a[0];b[1]=a[4];b[2]=a[8];b[3]=a[12];b[4]=a[1];b[5]=a[5];b[6]=a[9];b[7]=a[13];b[8]=a[2];b[9]=a[6];b[10]=a[10];b[11]=a[14];b[12]=a[3];b[13]=a[7];b[14]=a[11];b[15]=a[15];return b};
-mat4.determinant=function(a){var b=a[0],c=a[1],d=a[2],e=a[3],g=a[4],f=a[5],h=a[6],i=a[7],j=a[8],k=a[9],l=a[10],o=a[11],m=a[12],n=a[13],p=a[14];a=a[15];return m*k*h*e-j*n*h*e-m*f*l*e+g*n*l*e+j*f*p*e-g*k*p*e-m*k*d*i+j*n*d*i+m*c*l*i-b*n*l*i-j*c*p*i+b*k*p*i+m*f*d*o-g*n*d*o-m*c*h*o+b*n*h*o+g*c*p*o-b*f*p*o-j*f*d*a+g*k*d*a+j*c*h*a-b*k*h*a-g*c*l*a+b*f*l*a};
-mat4.inverse=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=a[3],f=a[4],h=a[5],i=a[6],j=a[7],k=a[8],l=a[9],o=a[10],m=a[11],n=a[12],p=a[13],r=a[14],s=a[15],A=c*h-d*f,B=c*i-e*f,t=c*j-g*f,u=d*i-e*h,v=d*j-g*h,w=e*j-g*i,x=k*p-l*n,y=k*r-o*n,z=k*s-m*n,C=l*r-o*p,D=l*s-m*p,E=o*s-m*r,q=1/(A*E-B*D+t*C+u*z-v*y+w*x);b[0]=(h*E-i*D+j*C)*q;b[1]=(-d*E+e*D-g*C)*q;b[2]=(p*w-r*v+s*u)*q;b[3]=(-l*w+o*v-m*u)*q;b[4]=(-f*E+i*z-j*y)*q;b[5]=(c*E-e*z+g*y)*q;b[6]=(-n*w+r*t-s*B)*q;b[7]=(k*w-o*t+m*B)*q;b[8]=(f*D-h*z+j*x)*q;
-b[9]=(-c*D+d*z-g*x)*q;b[10]=(n*v-p*t+s*A)*q;b[11]=(-k*v+l*t-m*A)*q;b[12]=(-f*C+h*y-i*x)*q;b[13]=(c*C-d*y+e*x)*q;b[14]=(-n*u+p*B-r*A)*q;b[15]=(k*u-l*B+o*A)*q;return b};mat4.toRotationMat=function(a,b){b||(b=mat4.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};
-mat4.toMat3=function(a,b){b||(b=mat3.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[4];b[4]=a[5];b[5]=a[6];b[6]=a[8];b[7]=a[9];b[8]=a[10];return b};mat4.toInverseMat3=function(a,b){var c=a[0],d=a[1],e=a[2],g=a[4],f=a[5],h=a[6],i=a[8],j=a[9],k=a[10],l=k*f-h*j,o=-k*g+h*i,m=j*g-f*i,n=c*l+d*o+e*m;if(!n)return null;n=1/n;b||(b=mat3.create());b[0]=l*n;b[1]=(-k*d+e*j)*n;b[2]=(h*d-e*f)*n;b[3]=o*n;b[4]=(k*c-e*i)*n;b[5]=(-h*c+e*g)*n;b[6]=m*n;b[7]=(-j*c+d*i)*n;b[8]=(f*c-d*g)*n;return b};
-mat4.multiply=function(a,b,c){c||(c=a);var d=a[0],e=a[1],g=a[2],f=a[3],h=a[4],i=a[5],j=a[6],k=a[7],l=a[8],o=a[9],m=a[10],n=a[11],p=a[12],r=a[13],s=a[14];a=a[15];var A=b[0],B=b[1],t=b[2],u=b[3],v=b[4],w=b[5],x=b[6],y=b[7],z=b[8],C=b[9],D=b[10],E=b[11],q=b[12],F=b[13],G=b[14];b=b[15];c[0]=A*d+B*h+t*l+u*p;c[1]=A*e+B*i+t*o+u*r;c[2]=A*g+B*j+t*m+u*s;c[3]=A*f+B*k+t*n+u*a;c[4]=v*d+w*h+x*l+y*p;c[5]=v*e+w*i+x*o+y*r;c[6]=v*g+w*j+x*m+y*s;c[7]=v*f+w*k+x*n+y*a;c[8]=z*d+C*h+D*l+E*p;c[9]=z*e+C*i+D*o+E*r;c[10]=z*
-g+C*j+D*m+E*s;c[11]=z*f+C*k+D*n+E*a;c[12]=q*d+F*h+G*l+b*p;c[13]=q*e+F*i+G*o+b*r;c[14]=q*g+F*j+G*m+b*s;c[15]=q*f+F*k+G*n+b*a;return c};mat4.multiplyVec3=function(a,b,c){c||(c=b);var d=b[0],e=b[1];b=b[2];c[0]=a[0]*d+a[4]*e+a[8]*b+a[12];c[1]=a[1]*d+a[5]*e+a[9]*b+a[13];c[2]=a[2]*d+a[6]*e+a[10]*b+a[14];return c};
-mat4.multiplyVec4=function(a,b,c){c||(c=b);var d=b[0],e=b[1],g=b[2];b=b[3];c[0]=a[0]*d+a[4]*e+a[8]*g+a[12]*b;c[1]=a[1]*d+a[5]*e+a[9]*g+a[13]*b;c[2]=a[2]*d+a[6]*e+a[10]*g+a[14]*b;c[3]=a[3]*d+a[7]*e+a[11]*g+a[15]*b;return c};
-mat4.translate=function(a,b,c){var d=b[0],e=b[1];b=b[2];if(!c||a==c){a[12]=a[0]*d+a[4]*e+a[8]*b+a[12];a[13]=a[1]*d+a[5]*e+a[9]*b+a[13];a[14]=a[2]*d+a[6]*e+a[10]*b+a[14];a[15]=a[3]*d+a[7]*e+a[11]*b+a[15];return a}var g=a[0],f=a[1],h=a[2],i=a[3],j=a[4],k=a[5],l=a[6],o=a[7],m=a[8],n=a[9],p=a[10],r=a[11];c[0]=g;c[1]=f;c[2]=h;c[3]=i;c[4]=j;c[5]=k;c[6]=l;c[7]=o;c[8]=m;c[9]=n;c[10]=p;c[11]=r;c[12]=g*d+j*e+m*b+a[12];c[13]=f*d+k*e+n*b+a[13];c[14]=h*d+l*e+p*b+a[14];c[15]=i*d+o*e+r*b+a[15];return c};
-mat4.scale=function(a,b,c){var d=b[0],e=b[1];b=b[2];if(!c||a==c){a[0]*=d;a[1]*=d;a[2]*=d;a[3]*=d;a[4]*=e;a[5]*=e;a[6]*=e;a[7]*=e;a[8]*=b;a[9]*=b;a[10]*=b;a[11]*=b;return a}c[0]=a[0]*d;c[1]=a[1]*d;c[2]=a[2]*d;c[3]=a[3]*d;c[4]=a[4]*e;c[5]=a[5]*e;c[6]=a[6]*e;c[7]=a[7]*e;c[8]=a[8]*b;c[9]=a[9]*b;c[10]=a[10]*b;c[11]=a[11]*b;c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15];return c};
-mat4.rotate=function(a,b,c,d){var e=c[0],g=c[1];c=c[2];var f=Math.sqrt(e*e+g*g+c*c);if(!f)return null;if(f!=1){f=1/f;e*=f;g*=f;c*=f}var h=Math.sin(b),i=Math.cos(b),j=1-i;b=a[0];f=a[1];var k=a[2],l=a[3],o=a[4],m=a[5],n=a[6],p=a[7],r=a[8],s=a[9],A=a[10],B=a[11],t=e*e*j+i,u=g*e*j+c*h,v=c*e*j-g*h,w=e*g*j-c*h,x=g*g*j+i,y=c*g*j+e*h,z=e*c*j+g*h;e=g*c*j-e*h;g=c*c*j+i;if(d){if(a!=d){d[12]=a[12];d[13]=a[13];d[14]=a[14];d[15]=a[15]}}else d=a;d[0]=b*t+o*u+r*v;d[1]=f*t+m*u+s*v;d[2]=k*t+n*u+A*v;d[3]=l*t+p*u+B*
-v;d[4]=b*w+o*x+r*y;d[5]=f*w+m*x+s*y;d[6]=k*w+n*x+A*y;d[7]=l*w+p*x+B*y;d[8]=b*z+o*e+r*g;d[9]=f*z+m*e+s*g;d[10]=k*z+n*e+A*g;d[11]=l*z+p*e+B*g;return d};mat4.rotateX=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[4],g=a[5],f=a[6],h=a[7],i=a[8],j=a[9],k=a[10],l=a[11];if(c){if(a!=c){c[0]=a[0];c[1]=a[1];c[2]=a[2];c[3]=a[3];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[4]=e*b+i*d;c[5]=g*b+j*d;c[6]=f*b+k*d;c[7]=h*b+l*d;c[8]=e*-d+i*b;c[9]=g*-d+j*b;c[10]=f*-d+k*b;c[11]=h*-d+l*b;return c};
-mat4.rotateY=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[0],g=a[1],f=a[2],h=a[3],i=a[8],j=a[9],k=a[10],l=a[11];if(c){if(a!=c){c[4]=a[4];c[5]=a[5];c[6]=a[6];c[7]=a[7];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[0]=e*b+i*-d;c[1]=g*b+j*-d;c[2]=f*b+k*-d;c[3]=h*b+l*-d;c[8]=e*d+i*b;c[9]=g*d+j*b;c[10]=f*d+k*b;c[11]=h*d+l*b;return c};
-mat4.rotateZ=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[0],g=a[1],f=a[2],h=a[3],i=a[4],j=a[5],k=a[6],l=a[7];if(c){if(a!=c){c[8]=a[8];c[9]=a[9];c[10]=a[10];c[11]=a[11];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[0]=e*b+i*d;c[1]=g*b+j*d;c[2]=f*b+k*d;c[3]=h*b+l*d;c[4]=e*-d+i*b;c[5]=g*-d+j*b;c[6]=f*-d+k*b;c[7]=h*-d+l*b;return c};
-mat4.frustum=function(a,b,c,d,e,g,f){f||(f=mat4.create());var h=b-a,i=d-c,j=g-e;f[0]=e*2/h;f[1]=0;f[2]=0;f[3]=0;f[4]=0;f[5]=e*2/i;f[6]=0;f[7]=0;f[8]=(b+a)/h;f[9]=(d+c)/i;f[10]=-(g+e)/j;f[11]=-1;f[12]=0;f[13]=0;f[14]=-(g*e*2)/j;f[15]=0;return f};mat4.perspective=function(a,b,c,d,e){a=c*Math.tan(a*Math.PI/360);b=a*b;return mat4.frustum(-b,b,-a,a,c,d,e)};
-mat4.ortho=function(a,b,c,d,e,g,f){f||(f=mat4.create());var h=b-a,i=d-c,j=g-e;f[0]=2/h;f[1]=0;f[2]=0;f[3]=0;f[4]=0;f[5]=2/i;f[6]=0;f[7]=0;f[8]=0;f[9]=0;f[10]=-2/j;f[11]=0;f[12]=-(a+b)/h;f[13]=-(d+c)/i;f[14]=-(g+e)/j;f[15]=1;return f};
-mat4.lookAt=function(a,b,c,d){d||(d=mat4.create());var e=a[0],g=a[1];a=a[2];var f=c[0],h=c[1],i=c[2];c=b[1];var j=b[2];if(e==b[0]&&g==c&&a==j)return mat4.identity(d);var k,l,o,m;c=e-b[0];j=g-b[1];b=a-b[2];m=1/Math.sqrt(c*c+j*j+b*b);c*=m;j*=m;b*=m;k=h*b-i*j;i=i*c-f*b;f=f*j-h*c;if(m=Math.sqrt(k*k+i*i+f*f)){m=1/m;k*=m;i*=m;f*=m}else f=i=k=0;h=j*f-b*i;l=b*k-c*f;o=c*i-j*k;if(m=Math.sqrt(h*h+l*l+o*o)){m=1/m;h*=m;l*=m;o*=m}else o=l=h=0;d[0]=k;d[1]=h;d[2]=c;d[3]=0;d[4]=i;d[5]=l;d[6]=j;d[7]=0;d[8]=f;d[9]=
-o;d[10]=b;d[11]=0;d[12]=-(k*e+i*g+f*a);d[13]=-(h*e+l*g+o*a);d[14]=-(c*e+j*g+b*a);d[15]=1;return d};mat4.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]+", "+a[6]+", "+a[7]+", "+a[8]+", "+a[9]+", "+a[10]+", "+a[11]+", "+a[12]+", "+a[13]+", "+a[14]+", "+a[15]+"]"};quat4={};quat4.create=function(a){var b=new glMatrixArrayType(4);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3]}return b};quat4.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];return b};
-quat4.calculateW=function(a,b){var c=a[0],d=a[1],e=a[2];if(!b||a==b){a[3]=-Math.sqrt(Math.abs(1-c*c-d*d-e*e));return a}b[0]=c;b[1]=d;b[2]=e;b[3]=-Math.sqrt(Math.abs(1-c*c-d*d-e*e));return b};quat4.inverse=function(a,b){if(!b||a==b){a[0]*=1;a[1]*=1;a[2]*=1;return a}b[0]=-a[0];b[1]=-a[1];b[2]=-a[2];b[3]=a[3];return b};quat4.length=function(a){var b=a[0],c=a[1],d=a[2];a=a[3];return Math.sqrt(b*b+c*c+d*d+a*a)};
-quat4.normalize=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=a[3],f=Math.sqrt(c*c+d*d+e*e+g*g);if(f==0){b[0]=0;b[1]=0;b[2]=0;b[3]=0;return b}f=1/f;b[0]=c*f;b[1]=d*f;b[2]=e*f;b[3]=g*f;return b};quat4.multiply=function(a,b,c){c||(c=a);var d=a[0],e=a[1],g=a[2];a=a[3];var f=b[0],h=b[1],i=b[2];b=b[3];c[0]=d*b+a*f+e*i-g*h;c[1]=e*b+a*h+g*f-d*i;c[2]=g*b+a*i+d*h-e*f;c[3]=a*b-d*f-e*h-g*i;return c};
-quat4.multiplyVec3=function(a,b,c){c||(c=b);var d=b[0],e=b[1],g=b[2];b=a[0];var f=a[1],h=a[2];a=a[3];var i=a*d+f*g-h*e,j=a*e+h*d-b*g,k=a*g+b*e-f*d;d=-b*d-f*e-h*g;c[0]=i*a+d*-b+j*-h-k*-f;c[1]=j*a+d*-f+k*-b-i*-h;c[2]=k*a+d*-h+i*-f-j*-b;return c};quat4.toMat3=function(a,b){b||(b=mat3.create());var c=a[0],d=a[1],e=a[2],g=a[3],f=c+c,h=d+d,i=e+e,j=c*f,k=c*h;c=c*i;var l=d*h;d=d*i;e=e*i;f=g*f;h=g*h;g=g*i;b[0]=1-(l+e);b[1]=k-g;b[2]=c+h;b[3]=k+g;b[4]=1-(j+e);b[5]=d-f;b[6]=c-h;b[7]=d+f;b[8]=1-(j+l);return b};
-quat4.toMat4=function(a,b){b||(b=mat4.create());var c=a[0],d=a[1],e=a[2],g=a[3],f=c+c,h=d+d,i=e+e,j=c*f,k=c*h;c=c*i;var l=d*h;d=d*i;e=e*i;f=g*f;h=g*h;g=g*i;b[0]=1-(l+e);b[1]=k-g;b[2]=c+h;b[3]=0;b[4]=k+g;b[5]=1-(j+e);b[6]=d-f;b[7]=0;b[8]=c-h;b[9]=d+f;b[10]=1-(j+l);b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};quat4.slerp=function(a,b,c,d){d||(d=a);var e=c;if(a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]<0)e=-1*c;d[0]=1-c*a[0]+e*b[0];d[1]=1-c*a[1]+e*b[1];d[2]=1-c*a[2]+e*b[2];d[3]=1-c*a[3]+e*b[3];return d};
-quat4.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+"]"};
+// glMatrix v0.9.5\r
+glMatrixArrayType=typeof Float32Array!="undefined"?Float32Array:typeof WebGLFloatArray!="undefined"?WebGLFloatArray:Array;var vec3={};vec3.create=function(a){var b=new glMatrixArrayType(3);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2]}return b};vec3.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];return b};vec3.add=function(a,b,c){if(!c||a==c){a[0]+=b[0];a[1]+=b[1];a[2]+=b[2];return a}c[0]=a[0]+b[0];c[1]=a[1]+b[1];c[2]=a[2]+b[2];return c};\r
+vec3.subtract=function(a,b,c){if(!c||a==c){a[0]-=b[0];a[1]-=b[1];a[2]-=b[2];return a}c[0]=a[0]-b[0];c[1]=a[1]-b[1];c[2]=a[2]-b[2];return c};vec3.negate=function(a,b){b||(b=a);b[0]=-a[0];b[1]=-a[1];b[2]=-a[2];return b};vec3.scale=function(a,b,c){if(!c||a==c){a[0]*=b;a[1]*=b;a[2]*=b;return a}c[0]=a[0]*b;c[1]=a[1]*b;c[2]=a[2]*b;return c};\r
+vec3.normalize=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=Math.sqrt(c*c+d*d+e*e);if(g){if(g==1){b[0]=c;b[1]=d;b[2]=e;return b}}else{b[0]=0;b[1]=0;b[2]=0;return b}g=1/g;b[0]=c*g;b[1]=d*g;b[2]=e*g;return b};vec3.cross=function(a,b,c){c||(c=a);var d=a[0],e=a[1];a=a[2];var g=b[0],f=b[1];b=b[2];c[0]=e*b-a*f;c[1]=a*g-d*b;c[2]=d*f-e*g;return c};vec3.length=function(a){var b=a[0],c=a[1];a=a[2];return Math.sqrt(b*b+c*c+a*a)};vec3.dot=function(a,b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]};\r
+vec3.direction=function(a,b,c){c||(c=a);var d=a[0]-b[0],e=a[1]-b[1];a=a[2]-b[2];b=Math.sqrt(d*d+e*e+a*a);if(!b){c[0]=0;c[1]=0;c[2]=0;return c}b=1/b;c[0]=d*b;c[1]=e*b;c[2]=a*b;return c};vec3.lerp=function(a,b,c,d){d||(d=a);d[0]=a[0]+c*(b[0]-a[0]);d[1]=a[1]+c*(b[1]-a[1]);d[2]=a[2]+c*(b[2]-a[2]);return d};vec3.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+"]"};var mat3={};\r
+mat3.create=function(a){var b=new glMatrixArrayType(9);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9]}return b};mat3.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];return b};mat3.identity=function(a){a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=1;a[5]=0;a[6]=0;a[7]=0;a[8]=1;return a};\r
+mat3.transpose=function(a,b){if(!b||a==b){var c=a[1],d=a[2],e=a[5];a[1]=a[3];a[2]=a[6];a[3]=c;a[5]=a[7];a[6]=d;a[7]=e;return a}b[0]=a[0];b[1]=a[3];b[2]=a[6];b[3]=a[1];b[4]=a[4];b[5]=a[7];b[6]=a[2];b[7]=a[5];b[8]=a[8];return b};mat3.toMat4=function(a,b){b||(b=mat4.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=0;b[4]=a[3];b[5]=a[4];b[6]=a[5];b[7]=0;b[8]=a[6];b[9]=a[7];b[10]=a[8];b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};\r
+mat3.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]+", "+a[6]+", "+a[7]+", "+a[8]+"]"};var mat4={};mat4.create=function(a){var b=new glMatrixArrayType(16);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15]}return b};\r
+mat4.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=a[12];b[13]=a[13];b[14]=a[14];b[15]=a[15];return b};mat4.identity=function(a){a[0]=1;a[1]=0;a[2]=0;a[3]=0;a[4]=0;a[5]=1;a[6]=0;a[7]=0;a[8]=0;a[9]=0;a[10]=1;a[11]=0;a[12]=0;a[13]=0;a[14]=0;a[15]=1;return a};\r
+mat4.transpose=function(a,b){if(!b||a==b){var c=a[1],d=a[2],e=a[3],g=a[6],f=a[7],h=a[11];a[1]=a[4];a[2]=a[8];a[3]=a[12];a[4]=c;a[6]=a[9];a[7]=a[13];a[8]=d;a[9]=g;a[11]=a[14];a[12]=e;a[13]=f;a[14]=h;return a}b[0]=a[0];b[1]=a[4];b[2]=a[8];b[3]=a[12];b[4]=a[1];b[5]=a[5];b[6]=a[9];b[7]=a[13];b[8]=a[2];b[9]=a[6];b[10]=a[10];b[11]=a[14];b[12]=a[3];b[13]=a[7];b[14]=a[11];b[15]=a[15];return b};\r
+mat4.determinant=function(a){var b=a[0],c=a[1],d=a[2],e=a[3],g=a[4],f=a[5],h=a[6],i=a[7],j=a[8],k=a[9],l=a[10],o=a[11],m=a[12],n=a[13],p=a[14];a=a[15];return m*k*h*e-j*n*h*e-m*f*l*e+g*n*l*e+j*f*p*e-g*k*p*e-m*k*d*i+j*n*d*i+m*c*l*i-b*n*l*i-j*c*p*i+b*k*p*i+m*f*d*o-g*n*d*o-m*c*h*o+b*n*h*o+g*c*p*o-b*f*p*o-j*f*d*a+g*k*d*a+j*c*h*a-b*k*h*a-g*c*l*a+b*f*l*a};\r
+mat4.inverse=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=a[3],f=a[4],h=a[5],i=a[6],j=a[7],k=a[8],l=a[9],o=a[10],m=a[11],n=a[12],p=a[13],r=a[14],s=a[15],A=c*h-d*f,B=c*i-e*f,t=c*j-g*f,u=d*i-e*h,v=d*j-g*h,w=e*j-g*i,x=k*p-l*n,y=k*r-o*n,z=k*s-m*n,C=l*r-o*p,D=l*s-m*p,E=o*s-m*r,q=1/(A*E-B*D+t*C+u*z-v*y+w*x);b[0]=(h*E-i*D+j*C)*q;b[1]=(-d*E+e*D-g*C)*q;b[2]=(p*w-r*v+s*u)*q;b[3]=(-l*w+o*v-m*u)*q;b[4]=(-f*E+i*z-j*y)*q;b[5]=(c*E-e*z+g*y)*q;b[6]=(-n*w+r*t-s*B)*q;b[7]=(k*w-o*t+m*B)*q;b[8]=(f*D-h*z+j*x)*q;\r
+b[9]=(-c*D+d*z-g*x)*q;b[10]=(n*v-p*t+s*A)*q;b[11]=(-k*v+l*t-m*A)*q;b[12]=(-f*C+h*y-i*x)*q;b[13]=(c*C-d*y+e*x)*q;b[14]=(-n*u+p*B-r*A)*q;b[15]=(k*u-l*B+o*A)*q;return b};mat4.toRotationMat=function(a,b){b||(b=mat4.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];b[4]=a[4];b[5]=a[5];b[6]=a[6];b[7]=a[7];b[8]=a[8];b[9]=a[9];b[10]=a[10];b[11]=a[11];b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};\r
+mat4.toMat3=function(a,b){b||(b=mat3.create());b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[4];b[4]=a[5];b[5]=a[6];b[6]=a[8];b[7]=a[9];b[8]=a[10];return b};mat4.toInverseMat3=function(a,b){var c=a[0],d=a[1],e=a[2],g=a[4],f=a[5],h=a[6],i=a[8],j=a[9],k=a[10],l=k*f-h*j,o=-k*g+h*i,m=j*g-f*i,n=c*l+d*o+e*m;if(!n)return null;n=1/n;b||(b=mat3.create());b[0]=l*n;b[1]=(-k*d+e*j)*n;b[2]=(h*d-e*f)*n;b[3]=o*n;b[4]=(k*c-e*i)*n;b[5]=(-h*c+e*g)*n;b[6]=m*n;b[7]=(-j*c+d*i)*n;b[8]=(f*c-d*g)*n;return b};\r
+mat4.multiply=function(a,b,c){c||(c=a);var d=a[0],e=a[1],g=a[2],f=a[3],h=a[4],i=a[5],j=a[6],k=a[7],l=a[8],o=a[9],m=a[10],n=a[11],p=a[12],r=a[13],s=a[14];a=a[15];var A=b[0],B=b[1],t=b[2],u=b[3],v=b[4],w=b[5],x=b[6],y=b[7],z=b[8],C=b[9],D=b[10],E=b[11],q=b[12],F=b[13],G=b[14];b=b[15];c[0]=A*d+B*h+t*l+u*p;c[1]=A*e+B*i+t*o+u*r;c[2]=A*g+B*j+t*m+u*s;c[3]=A*f+B*k+t*n+u*a;c[4]=v*d+w*h+x*l+y*p;c[5]=v*e+w*i+x*o+y*r;c[6]=v*g+w*j+x*m+y*s;c[7]=v*f+w*k+x*n+y*a;c[8]=z*d+C*h+D*l+E*p;c[9]=z*e+C*i+D*o+E*r;c[10]=z*\r
+g+C*j+D*m+E*s;c[11]=z*f+C*k+D*n+E*a;c[12]=q*d+F*h+G*l+b*p;c[13]=q*e+F*i+G*o+b*r;c[14]=q*g+F*j+G*m+b*s;c[15]=q*f+F*k+G*n+b*a;return c};mat4.multiplyVec3=function(a,b,c){c||(c=b);var d=b[0],e=b[1];b=b[2];c[0]=a[0]*d+a[4]*e+a[8]*b+a[12];c[1]=a[1]*d+a[5]*e+a[9]*b+a[13];c[2]=a[2]*d+a[6]*e+a[10]*b+a[14];return c};\r
+mat4.multiplyVec4=function(a,b,c){c||(c=b);var d=b[0],e=b[1],g=b[2];b=b[3];c[0]=a[0]*d+a[4]*e+a[8]*g+a[12]*b;c[1]=a[1]*d+a[5]*e+a[9]*g+a[13]*b;c[2]=a[2]*d+a[6]*e+a[10]*g+a[14]*b;c[3]=a[3]*d+a[7]*e+a[11]*g+a[15]*b;return c};\r
+mat4.translate=function(a,b,c){var d=b[0],e=b[1];b=b[2];if(!c||a==c){a[12]=a[0]*d+a[4]*e+a[8]*b+a[12];a[13]=a[1]*d+a[5]*e+a[9]*b+a[13];a[14]=a[2]*d+a[6]*e+a[10]*b+a[14];a[15]=a[3]*d+a[7]*e+a[11]*b+a[15];return a}var g=a[0],f=a[1],h=a[2],i=a[3],j=a[4],k=a[5],l=a[6],o=a[7],m=a[8],n=a[9],p=a[10],r=a[11];c[0]=g;c[1]=f;c[2]=h;c[3]=i;c[4]=j;c[5]=k;c[6]=l;c[7]=o;c[8]=m;c[9]=n;c[10]=p;c[11]=r;c[12]=g*d+j*e+m*b+a[12];c[13]=f*d+k*e+n*b+a[13];c[14]=h*d+l*e+p*b+a[14];c[15]=i*d+o*e+r*b+a[15];return c};\r
+mat4.scale=function(a,b,c){var d=b[0],e=b[1];b=b[2];if(!c||a==c){a[0]*=d;a[1]*=d;a[2]*=d;a[3]*=d;a[4]*=e;a[5]*=e;a[6]*=e;a[7]*=e;a[8]*=b;a[9]*=b;a[10]*=b;a[11]*=b;return a}c[0]=a[0]*d;c[1]=a[1]*d;c[2]=a[2]*d;c[3]=a[3]*d;c[4]=a[4]*e;c[5]=a[5]*e;c[6]=a[6]*e;c[7]=a[7]*e;c[8]=a[8]*b;c[9]=a[9]*b;c[10]=a[10]*b;c[11]=a[11]*b;c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15];return c};\r
+mat4.rotate=function(a,b,c,d){var e=c[0],g=c[1];c=c[2];var f=Math.sqrt(e*e+g*g+c*c);if(!f)return null;if(f!=1){f=1/f;e*=f;g*=f;c*=f}var h=Math.sin(b),i=Math.cos(b),j=1-i;b=a[0];f=a[1];var k=a[2],l=a[3],o=a[4],m=a[5],n=a[6],p=a[7],r=a[8],s=a[9],A=a[10],B=a[11],t=e*e*j+i,u=g*e*j+c*h,v=c*e*j-g*h,w=e*g*j-c*h,x=g*g*j+i,y=c*g*j+e*h,z=e*c*j+g*h;e=g*c*j-e*h;g=c*c*j+i;if(d){if(a!=d){d[12]=a[12];d[13]=a[13];d[14]=a[14];d[15]=a[15]}}else d=a;d[0]=b*t+o*u+r*v;d[1]=f*t+m*u+s*v;d[2]=k*t+n*u+A*v;d[3]=l*t+p*u+B*\r
+v;d[4]=b*w+o*x+r*y;d[5]=f*w+m*x+s*y;d[6]=k*w+n*x+A*y;d[7]=l*w+p*x+B*y;d[8]=b*z+o*e+r*g;d[9]=f*z+m*e+s*g;d[10]=k*z+n*e+A*g;d[11]=l*z+p*e+B*g;return d};mat4.rotateX=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[4],g=a[5],f=a[6],h=a[7],i=a[8],j=a[9],k=a[10],l=a[11];if(c){if(a!=c){c[0]=a[0];c[1]=a[1];c[2]=a[2];c[3]=a[3];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[4]=e*b+i*d;c[5]=g*b+j*d;c[6]=f*b+k*d;c[7]=h*b+l*d;c[8]=e*-d+i*b;c[9]=g*-d+j*b;c[10]=f*-d+k*b;c[11]=h*-d+l*b;return c};\r
+mat4.rotateY=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[0],g=a[1],f=a[2],h=a[3],i=a[8],j=a[9],k=a[10],l=a[11];if(c){if(a!=c){c[4]=a[4];c[5]=a[5];c[6]=a[6];c[7]=a[7];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[0]=e*b+i*-d;c[1]=g*b+j*-d;c[2]=f*b+k*-d;c[3]=h*b+l*-d;c[8]=e*d+i*b;c[9]=g*d+j*b;c[10]=f*d+k*b;c[11]=h*d+l*b;return c};\r
+mat4.rotateZ=function(a,b,c){var d=Math.sin(b);b=Math.cos(b);var e=a[0],g=a[1],f=a[2],h=a[3],i=a[4],j=a[5],k=a[6],l=a[7];if(c){if(a!=c){c[8]=a[8];c[9]=a[9];c[10]=a[10];c[11]=a[11];c[12]=a[12];c[13]=a[13];c[14]=a[14];c[15]=a[15]}}else c=a;c[0]=e*b+i*d;c[1]=g*b+j*d;c[2]=f*b+k*d;c[3]=h*b+l*d;c[4]=e*-d+i*b;c[5]=g*-d+j*b;c[6]=f*-d+k*b;c[7]=h*-d+l*b;return c};\r
+mat4.frustum=function(a,b,c,d,e,g,f){f||(f=mat4.create());var h=b-a,i=d-c,j=g-e;f[0]=e*2/h;f[1]=0;f[2]=0;f[3]=0;f[4]=0;f[5]=e*2/i;f[6]=0;f[7]=0;f[8]=(b+a)/h;f[9]=(d+c)/i;f[10]=-(g+e)/j;f[11]=-1;f[12]=0;f[13]=0;f[14]=-(g*e*2)/j;f[15]=0;return f};mat4.perspective=function(a,b,c,d,e){a=c*Math.tan(a*Math.PI/360);b=a*b;return mat4.frustum(-b,b,-a,a,c,d,e)};\r
+mat4.ortho=function(a,b,c,d,e,g,f){f||(f=mat4.create());var h=b-a,i=d-c,j=g-e;f[0]=2/h;f[1]=0;f[2]=0;f[3]=0;f[4]=0;f[5]=2/i;f[6]=0;f[7]=0;f[8]=0;f[9]=0;f[10]=-2/j;f[11]=0;f[12]=-(a+b)/h;f[13]=-(d+c)/i;f[14]=-(g+e)/j;f[15]=1;return f};\r
+mat4.lookAt=function(a,b,c,d){d||(d=mat4.create());var e=a[0],g=a[1];a=a[2];var f=c[0],h=c[1],i=c[2];c=b[1];var j=b[2];if(e==b[0]&&g==c&&a==j)return mat4.identity(d);var k,l,o,m;c=e-b[0];j=g-b[1];b=a-b[2];m=1/Math.sqrt(c*c+j*j+b*b);c*=m;j*=m;b*=m;k=h*b-i*j;i=i*c-f*b;f=f*j-h*c;if(m=Math.sqrt(k*k+i*i+f*f)){m=1/m;k*=m;i*=m;f*=m}else f=i=k=0;h=j*f-b*i;l=b*k-c*f;o=c*i-j*k;if(m=Math.sqrt(h*h+l*l+o*o)){m=1/m;h*=m;l*=m;o*=m}else o=l=h=0;d[0]=k;d[1]=h;d[2]=c;d[3]=0;d[4]=i;d[5]=l;d[6]=j;d[7]=0;d[8]=f;d[9]=\r
+o;d[10]=b;d[11]=0;d[12]=-(k*e+i*g+f*a);d[13]=-(h*e+l*g+o*a);d[14]=-(c*e+j*g+b*a);d[15]=1;return d};mat4.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]+", "+a[6]+", "+a[7]+", "+a[8]+", "+a[9]+", "+a[10]+", "+a[11]+", "+a[12]+", "+a[13]+", "+a[14]+", "+a[15]+"]"};quat4={};quat4.create=function(a){var b=new glMatrixArrayType(4);if(a){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3]}return b};quat4.set=function(a,b){b[0]=a[0];b[1]=a[1];b[2]=a[2];b[3]=a[3];return b};\r
+quat4.calculateW=function(a,b){var c=a[0],d=a[1],e=a[2];if(!b||a==b){a[3]=-Math.sqrt(Math.abs(1-c*c-d*d-e*e));return a}b[0]=c;b[1]=d;b[2]=e;b[3]=-Math.sqrt(Math.abs(1-c*c-d*d-e*e));return b};quat4.inverse=function(a,b){if(!b||a==b){a[0]*=1;a[1]*=1;a[2]*=1;return a}b[0]=-a[0];b[1]=-a[1];b[2]=-a[2];b[3]=a[3];return b};quat4.length=function(a){var b=a[0],c=a[1],d=a[2];a=a[3];return Math.sqrt(b*b+c*c+d*d+a*a)};\r
+quat4.normalize=function(a,b){b||(b=a);var c=a[0],d=a[1],e=a[2],g=a[3],f=Math.sqrt(c*c+d*d+e*e+g*g);if(f==0){b[0]=0;b[1]=0;b[2]=0;b[3]=0;return b}f=1/f;b[0]=c*f;b[1]=d*f;b[2]=e*f;b[3]=g*f;return b};quat4.multiply=function(a,b,c){c||(c=a);var d=a[0],e=a[1],g=a[2];a=a[3];var f=b[0],h=b[1],i=b[2];b=b[3];c[0]=d*b+a*f+e*i-g*h;c[1]=e*b+a*h+g*f-d*i;c[2]=g*b+a*i+d*h-e*f;c[3]=a*b-d*f-e*h-g*i;return c};\r
+quat4.multiplyVec3=function(a,b,c){c||(c=b);var d=b[0],e=b[1],g=b[2];b=a[0];var f=a[1],h=a[2];a=a[3];var i=a*d+f*g-h*e,j=a*e+h*d-b*g,k=a*g+b*e-f*d;d=-b*d-f*e-h*g;c[0]=i*a+d*-b+j*-h-k*-f;c[1]=j*a+d*-f+k*-b-i*-h;c[2]=k*a+d*-h+i*-f-j*-b;return c};quat4.toMat3=function(a,b){b||(b=mat3.create());var c=a[0],d=a[1],e=a[2],g=a[3],f=c+c,h=d+d,i=e+e,j=c*f,k=c*h;c=c*i;var l=d*h;d=d*i;e=e*i;f=g*f;h=g*h;g=g*i;b[0]=1-(l+e);b[1]=k-g;b[2]=c+h;b[3]=k+g;b[4]=1-(j+e);b[5]=d-f;b[6]=c-h;b[7]=d+f;b[8]=1-(j+l);return b};\r
+quat4.toMat4=function(a,b){b||(b=mat4.create());var c=a[0],d=a[1],e=a[2],g=a[3],f=c+c,h=d+d,i=e+e,j=c*f,k=c*h;c=c*i;var l=d*h;d=d*i;e=e*i;f=g*f;h=g*h;g=g*i;b[0]=1-(l+e);b[1]=k-g;b[2]=c+h;b[3]=0;b[4]=k+g;b[5]=1-(j+e);b[6]=d-f;b[7]=0;b[8]=c-h;b[9]=d+f;b[10]=1-(j+l);b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return b};quat4.slerp=function(a,b,c,d){d||(d=a);var e=c;if(a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3]<0)e=-1*c;d[0]=1-c*a[0]+e*b[0];d[1]=1-c*a[1]+e*b[1];d[2]=1-c*a[2]+e*b[2];d[3]=1-c*a[3]+e*b[3];return d};\r
+quat4.str=function(a){return"["+a[0]+", "+a[1]+", "+a[2]+", "+a[3]+"]"};\r
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-management/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index b412190d9173fb1959c920045fdae08129fe27d8..0339c5377824da423a23a43f4989dd4936742e69 100644 (file)
@@ -447,7 +447,7 @@ EXHIBIT A -Mozilla Public License.
      The Original Code is RabbitMQ Management Plugin.
 
      The Initial Developer of the Original Code is GoPivotal, Inc.
-     Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.''
+     Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.''
 
      [NOTE: The text of this Exhibit A may differ slightly from the text of
      the notices in the Source Code files of the Original Code. You should
index 6f4cb6c095e1ca14fd675b37f8f8e049681e2fb5..f8f2da51aee06b95be04c73946dfdbc05de2bb78 100755 (executable)
 #   The Original Code is RabbitMQ Management Plugin.
 #
 #   The Initial Developer of the Original Code is GoPivotal, Inc.
-#   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+#   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 
 import sys
-if sys.version_info[0] < 2 or sys.version_info[1] < 6:
-    print "Sorry, rabbitmqadmin requires at least Python 2.6."
+if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 6):
+    print("Sorry, rabbitmqadmin requires at least Python 2.6.")
     sys.exit(1)
 
-from ConfigParser import ConfigParser, NoSectionError
 from optparse import OptionParser, TitledHelpFormatter
-import httplib
 import urllib
-import urlparse
 import base64
 import json
 import os
 import socket
 
+if sys.version_info[0] == 2:
+    from ConfigParser import ConfigParser, NoSectionError
+    import httplib
+    import urlparse
+    from urllib import quote_plus
+    def b64(s):
+        return base64.b64encode(s)
+else:
+    from configparser import ConfigParser, NoSectionError
+    import http.client as httplib
+    import urllib.parse as urlparse
+    from urllib.parse import quote_plus
+    def b64(s):
+        return base64.b64encode(s.encode('utf-8')).decode('utf-8')
+
 VERSION = '%%VSN%%'
 
-LISTABLE = {'connections': {'vhost': False},
-            'channels':    {'vhost': False},
-            'exchanges':   {'vhost': True},
-            'queues':      {'vhost': True},
-            'bindings':    {'vhost': True},
+LISTABLE = {'connections': {'vhost': False, 'cols': ['name','user','channels']},
+            'channels':    {'vhost': False, 'cols': ['name', 'user']},
+            'consumers':   {'vhost': True},
+            'exchanges':   {'vhost': True,  'cols': ['name', 'type']},
+            'queues':      {'vhost': True,  'cols': ['name', 'messages']},
+            'bindings':    {'vhost': True,  'cols': ['source', 'destination',
+                                                     'routing_key']},
             'users':       {'vhost': False},
-            'vhosts':      {'vhost': False},
+            'vhosts':      {'vhost': False, 'cols': ['name', 'messages']},
             'permissions': {'vhost': False},
-            'nodes':       {'vhost': False},
-            'parameters':  {'vhost': False,
-                            'json':  ['value']},
-            'policies':    {'vhost': False,
-                            'json':  ['definition']}}
+            'nodes':       {'vhost': False, 'cols': ['name','type','mem_used']},
+            'parameters':  {'vhost': False, 'json': ['value']},
+            'policies':    {'vhost': False, 'json': ['definition']}}
 
-SHOWABLE = {'overview': {'vhost': False}}
+SHOWABLE = {'overview': {'vhost': False, 'cols': ['rabbitmq_version',
+                                                  'cluster_name',
+                                                  'queue_totals.messages',
+                                                  'object_totals.queues']}}
 
 PROMOTE_COLUMNS = ['vhost', 'name', 'type',
                    'source', 'destination', 'destination_type', 'routing_key']
@@ -118,8 +133,10 @@ PURGABLE = {
 EXTRA_VERBS = {
     'publish': {'mandatory': ['routing_key'],
                 'optional':  {'payload': None,
+                              'properties': {},
                               'exchange': 'amq.default',
                               'payload_encoding': 'string'},
+                'json':      ['properties'],
                 'uri':       '/exchanges/{vhost}/{exchange}/publish'},
     'get':     {'mandatory': ['queue'],
                 'optional':  {'count': '1', 'requeue': 'true',
@@ -341,7 +358,7 @@ def make_configuration():
         try:
             config.read(options.config)
             new_conf = dict(config.items(options.node))
-        except NoSectionError, error:
+        except NoSectionError as error:
             if options.node == "default":
                 pass
             else:
@@ -384,14 +401,14 @@ def main():
     method()
 
 def output(s):
-    print maybe_utf8(s, sys.stdout)
+    print(maybe_utf8(s, sys.stdout))
 
 def die(s):
     sys.stderr.write(maybe_utf8("*** {0}\n".format(s), sys.stderr))
     exit(1)
 
 def maybe_utf8(s, stream):
-    if stream.isatty():
+    if sys.version_info[0] == 3 or stream.isatty():
         # It will have an encoding, which Python will respect
         return s
     else:
@@ -424,14 +441,14 @@ class Management:
         else:
             conn = httplib.HTTPConnection(self.options.hostname,
                                           self.options.port)
-        headers = {"Authorization":
-                       "Basic " + base64.b64encode(self.options.username + ":" +
-                                                   self.options.password)}
+        auth = (self.options.username + ":" + self.options.password)
+
+        headers = {"Authorization": "Basic " + b64(auth)}
         if body != "":
             headers["Content-Type"] = "application/json"
         try:
             conn.request(method, path, body, headers)
-        except socket.error, e:
+        except socket.error as e:
             die("Could not connect: {0}".format(e))
         resp = conn.getresponse()
         if resp.status == 400:
@@ -449,7 +466,7 @@ class Management:
         if resp.status < 200 or resp.status > 400:
             raise Exception("Received %d %s for path %s\n%s"
                             % (resp.status, resp.reason, path, resp.read()))
-        return resp.read()
+        return resp.read().decode('utf-8')
 
     def verbose(self, string):
         if self.options.verbose:
@@ -459,6 +476,11 @@ class Management:
         assert_usage(len(self.args) == 1, 'Exactly one argument required')
         return self.args[0]
 
+    def use_cols(self):
+        # Deliberately do not cast to int here; we only care about the
+        # default, not explicit setting.
+        return self.options.depth == 1 and not 'json' in self.options.format
+
     def invoke_help(self):
         if len(self.args) == 0:
             parser.print_help()
@@ -472,15 +494,14 @@ class Management:
                 assert_usage(False, """help topic must be one of:
   subcommands
   config""")
-            print usage
+            print(usage)
         exit(0)
 
     def invoke_publish(self):
         (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
-        upload['properties'] = {} # TODO do we care here?
         if not 'payload' in upload:
             data = sys.stdin.read()
-            upload['payload'] = base64.b64encode(data)
+            upload['payload'] = b64(data)
             upload['payload_encoding'] = 'base64'
         resp = json.loads(self.post(uri, json.dumps(upload)))
         if resp['routed']:
@@ -521,16 +542,14 @@ class Management:
                      % (self.options.hostname, path))
 
     def invoke_list(self):
-        cols = self.args[1:]
-        (uri, obj_info) = self.list_show_uri(LISTABLE, 'list', cols)
+        (uri, obj_info, cols) = self.list_show_uri(LISTABLE, 'list')
         format_list(self.get(uri), cols, obj_info, self.options)
 
     def invoke_show(self):
-        cols = self.args[1:]
-        (uri, obj_info) = self.list_show_uri(SHOWABLE, 'show', cols)
+        (uri, obj_info, cols) = self.list_show_uri(SHOWABLE, 'show')
         format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
 
-    def list_show_uri(self, obj_types, verb, cols):
+    def list_show_uri(self, obj_types, verb):
         obj_type = self.args[0]
         assert_usage(obj_type in obj_types,
                      "Don't know how to {0} {1}".format(verb, obj_type))
@@ -538,7 +557,10 @@ class Management:
         uri = "/%s" % obj_type
         query = []
         if obj_info['vhost'] and self.options.vhost:
-            uri += "/%s" % urllib.quote_plus(self.options.vhost)
+            uri += "/%s" % quote_plus(self.options.vhost)
+        cols = self.args[1:]
+        if cols == [] and 'cols' in obj_info and self.use_cols():
+            cols = obj_info['cols']
         if cols != []:
             query.append("columns=" + ",".join(cols))
         sort = self.options.sort
@@ -549,7 +571,7 @@ class Management:
         query = "&".join(query)
         if query != "":
             uri += "?" + query
-        return (uri, obj_info)
+        return (uri, obj_info, cols)
 
     def invoke_declare(self):
         (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
@@ -589,7 +611,7 @@ class Management:
         uri_template = obj['uri']
         upload = {}
         for k in optional.keys():
-            if optional[k]:
+            if optional[k] is not None:
                 upload[k] = optional[k]
         for arg in args:
             assert_usage("=" in arg,
@@ -609,8 +631,8 @@ class Management:
         uri_args = {}
         for k in upload:
             v = upload[k]
-            if v and isinstance(v, basestring):
-                uri_args[k] = urllib.quote_plus(v)
+            if v and isinstance(v, (str, bytes)):
+                uri_args[k] = quote_plus(v)
                 if k == 'destination_type':
                     uri_args['destination_char'] = v[0]
         uri = uri_template.format(**uri_args)
@@ -620,7 +642,7 @@ class Management:
         try:
             return json.loads(text)
         except ValueError:
-            print "Could not parse JSON:\n  {0}".format(text)
+            print("Could not parse JSON:\n  {0}".format(text))
             sys.exit(1)
 
 def format_list(json_list, columns, args, options):
@@ -646,7 +668,7 @@ class Lister:
             output(string)
 
     def display(self, json_list):
-        depth = sys.maxint
+        depth = sys.maxsize
         if len(self.columns) == 0:
             depth = int(self.options.depth)
         (columns, table) = self.list_to_table(json.loads(json_list), depth)
@@ -666,7 +688,7 @@ class Lister:
                 column = prefix == '' and key or (prefix + '.' + key)
                 subitem = item[key]
                 if type(subitem) == dict:
-                    if self.obj_info.has_key('json') and key in self.obj_info['json']:
+                    if 'json' in self.obj_info and key in self.obj_info['json']:
                         fun(column, json.dumps(subitem))
                     else:
                         if depth < max_depth:
@@ -676,7 +698,7 @@ class Lister:
                     # mind (which come out looking decent); the second
                     # one has applications in nodes (which look less
                     # so, but what would look good?).
-                    if [x for x in subitem if type(x) != unicode] == []:
+                    if [x for x in subitem if type(x) != str] == []:
                         serialised = " ".join(subitem)
                     else:
                         serialised = json.dumps(subitem)
@@ -689,17 +711,17 @@ class Lister:
 
         def add_to_row(col, val):
             if col in column_ix:
-                row[column_ix[col]] = unicode(val)
+                row[column_ix[col]] = str(val)
 
         if len(self.columns) == 0:
             for item in items:
                 add('', 1, item, add_to_columns)
-            columns = columns.keys()
+            columns = list(columns.keys())
             columns.sort(key=column_sort_key)
         else:
             columns = self.columns
 
-        for i in xrange(0, len(columns)):
+        for i in range(0, len(columns)):
             column_ix[columns[i]] = i
         for item in items:
             row = len(columns) * ['']
@@ -733,10 +755,10 @@ class LongList(Lister):
         max_width = 0
         for col in columns:
             max_width = max(max_width, len(col))
-        fmt = "{0:>" + unicode(max_width) + "}: {1}"
+        fmt = "{0:>" + str(max_width) + "}: {1}"
         output(sep)
-        for i in xrange(0, len(table)):
-            for j in xrange(0, len(columns)):
+        for i in range(0, len(table)):
+            for j in range(0, len(columns)):
                 output(fmt.format(columns[j], table[i][j]))
             output(sep)
 
@@ -754,8 +776,8 @@ class TableList(Lister):
     def ascii_table(self, rows):
         table = ""
         col_widths = [0] * len(rows[0])
-        for i in xrange(0, len(rows[0])):
-            for j in xrange(0, len(rows)):
+        for i in range(0, len(rows[0])):
+            for j in range(0, len(rows)):
                 col_widths[i] = max(col_widths[i], len(rows[j][i]))
         self.ascii_bar(col_widths)
         self.ascii_row(col_widths, rows[0], "^")
@@ -766,8 +788,8 @@ class TableList(Lister):
 
     def ascii_row(self, col_widths, row, align):
         txt = "|"
-        for i in xrange(0, len(col_widths)):
-            fmt = " {0:" + align + unicode(col_widths[i]) + "} "
+        for i in range(0, len(col_widths)):
+            fmt = " {0:" + align + str(col_widths[i]) + "} "
             txt += fmt.format(row[i]) + "|"
         output(txt)
 
@@ -784,9 +806,9 @@ class KeyValueList(Lister):
         self.options = options
 
     def display_list(self, columns, table):
-        for i in xrange(0, len(table)):
+        for i in range(0, len(table)):
             row = []
-            for j in xrange(0, len(columns)):
+            for j in range(0, len(columns)):
                 row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
             output(" ".join(row))
 
@@ -799,7 +821,7 @@ class BashList(Lister):
 
     def display_list(self, columns, table):
         ix = None
-        for i in xrange(0, len(columns)):
+        for i in range(0, len(columns)):
             if columns[i] == 'name':
                 ix = i
         if ix is not None:
index f1ed238d4327ef2ab2d983027fd118a2b37cd309..6b9bbe24d422075704071ff7836a03ca52137bcf 100644 (file)
@@ -5,6 +5,8 @@
                          %% List of {MaxAgeSecs, IfTimestampDivisibleBySecs}
                          [{global,   [{10000000000000, 1}]},
                           {basic,    [{10000000000000, 1}]},
-                          {detailed, [{10000000000000, 1}]}]}
+                          {detailed, [{10000000000000, 1}]}]},
+                        %% We're going to test this, so enable it!
+                        {rates_mode, detailed}
                        ]}
 ].
index d9f342ebbc04daf52b659f2536adb48a078a0018..43cc67dcb8bfa3d7c80c6a4db21f98482a7f067e 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -record(context, {user, password = none}).
index 16e5c584b04ff96dff3e9918399b333f08ec7bcc..3d0817a940a813fb0fafcfd5de71749bdd703389 100644 (file)
@@ -5,7 +5,7 @@ COVER:=false
 WITH_BROKER_TEST_COMMANDS:=rabbit_test_runner:run_in_broker(\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\")
 WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/etc/rabbit-test
 STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),\"/tmp/rabbitmq-multi-node/plugins\")
-WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/rabbitmqadmin-test.py
+WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/rabbitmqadmin-test-wrapper.sh
 
 CONSTRUCT_APP_PREREQS:=$(shell find $(PACKAGE_DIR)/priv -type f) $(PACKAGE_DIR)/bin/rabbitmqadmin
 define construct_app_commands
index 4a547e5dc1ce9d1581f22f9d3d270f6055c489df..a509a697b1f2e468dd3c9caaad819ef0f3059e88 100644 (file)
@@ -202,7 +202,9 @@ Content-Length: 0</pre>
         <td class="path">/api/nodes/<i>name</i></td>
         <td>
           An individual node in the RabbitMQ cluster. Add
-          "?memory=true" to get memory statistics.
+          "?memory=true" to get memory statistics, and "?binary=true"
+          to get a breakdown of binary memory use (may be expensive if
+          there are many small binaries in the system).
         </td>
       </tr>
       <tr>
@@ -227,11 +229,24 @@ Content-Length: 0</pre>
           messages. POST to upload an existing set of definitions. Note
           that:
           <ul>
-            <li>The definitions are merged. Anything already existing is
-            untouched.</li>
-            <li>Conflicts will cause an error.</li>
-            <li>In the event of an error you will be left with a
-            part-applied set of definitions.</li>
+            <li>
+              The definitions are merged. Anything already existing on
+              the server but not in the uploaded definitions is
+              untouched.
+            </li>
+            <li>
+              Conflicting definitions on immutable objects (exchanges,
+              queues and bindings) will cause an error.
+            </li>
+            <li>
+              Conflicting definitions on mutable objects will cause
+              the object in the server to be overwritten with the
+              object from the definitions.
+            </li>
+            <li>
+              In the event of an error you will be left with a
+              part-applied set of definitions.
+            </li>
           </ul>
           For convenience you may upload a file from a browser to this
           URI (i.e. you can use <code>multipart/form-data</code> as
@@ -286,6 +301,22 @@ Content-Length: 0</pre>
         <td class="path">/api/channels/<i>channel</i></td>
         <td>Details about an individual channel.</td>
       </tr>
+      <tr>
+        <td>X</td>
+        <td></td>
+        <td></td>
+        <td></td>
+        <td class="path">/api/consumers</td>
+        <td>A list of all consumers.</td>
+      </tr>
+      <tr>
+        <td>X</td>
+        <td></td>
+        <td></td>
+        <td></td>
+        <td class="path">/api/consumers/<i>vhost</i></td>
+        <td>A list of all consumers in a given virtual host.</td>
+      </tr>
       <tr>
         <td>X</td>
         <td></td>
@@ -308,9 +339,17 @@ Content-Length: 0</pre>
         <td>X</td>
         <td></td>
         <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i></td>
-        <td>An individual exchange. To PUT an exchange, you will need a body looking something like this:
-<pre>{"type":"direct","auto_delete":false,"durable":true,"internal":false,"arguments":[]}</pre>
-        The <code>type</code> key is mandatory; other keys are optional.</td>
+        <td>
+          An individual exchange. To PUT an exchange, you will need a body looking something like this:
+          <pre>{"type":"direct","auto_delete":false,"durable":true,"internal":false,"arguments":{}}</pre>
+          The <code>type</code> key is mandatory; other keys are optional.
+          <p>
+            When DELETEing an exchange you can add the query string
+            parameter <code>if-unused=true</code>. This prevents the
+            delete from succeeding if the exchange is bound to a queue
+            or as a source to another exchange.
+          </p>
+        </td>
       </tr>
       <tr>
         <td>X</td>
@@ -348,10 +387,13 @@ Content-Length: 0</pre>
           <pre>{"routed": true}</pre>
           <code>routed</code> will be true if the message was sent to
           at least one queue.
-          <p>Please note that the publish / get paths in the HTTP API are
-          intended for injecting test messages, diagnostics etc - they do not
-          implement reliable delivery and so should be treated as a sysadmin's
-          tool rather than a general API for messaging.</p>
+          <p>
+            Please note that the HTTP API is not ideal for high
+            performance publishing; the need to create a new TCP
+            connection for each message published can limit message
+            throughput compared to AMQP or other protocols using
+            long-lived connections.
+          </p>
         </td>
       </tr>
       <tr>
@@ -376,9 +418,18 @@ Content-Length: 0</pre>
         <td>X</td>
         <td></td>
         <td class="path">/api/queues/<i>vhost</i>/<i>name</i></td>
-        <td>An individual queue. To PUT a queue, you will need a body looking something like this:
-<pre>{"auto_delete":false,"durable":true,"arguments":[],"node":"rabbit@smacmullen"}</pre>
-        All keys are optional.</td>
+        <td>
+          An individual queue. To PUT a queue, you will need a body looking something like this:
+          <pre>{"auto_delete":false,"durable":true,"arguments":{},"node":"rabbit@smacmullen"}</pre>
+          All keys are optional.
+          <p>
+            When DELETEing a queue you can add the query string
+            parameters <code>if-empty=true</code> and /
+            or <code>if-unused=true</code>. These prevent the delete
+            from succeeding if the queue contains messages, or has
+            consumers, respectively.
+          </p>
+        </td>
       </tr>
       <tr>
         <td>X</td>
@@ -435,10 +486,12 @@ Content-Length: 0</pre>
             message payload if it is larger than the size given (in bytes).</li>
           </ul>
           <p><code>truncate</code> is optional; all other keys are mandatory.</p>
-          <p>Please note that the publish / get paths in the HTTP API are
-          intended for injecting test messages, diagnostics etc - they do not
-          implement reliable delivery and so should be treated as a sysadmin's
-          tool rather than a general API for messaging.</p>
+          <p>
+            Please note that the get path in the HTTP API is intended
+            for diagnostics etc - it does not implement reliable
+            delivery and so should be treated as a sysadmin's tool
+            rather than a general API for messaging.
+          </p>
         </td>
       </tr>
       <tr>
@@ -467,7 +520,7 @@ Content-Length: 0</pre>
         queue. Remember, an exchange and a queue can be bound
         together many times! To create a new binding, POST to this
         URI. You will need a body looking something like this:
-          <pre>{"routing_key":"my_routing_key","arguments":[]}</pre>
+          <pre>{"routing_key":"my_routing_key","arguments":{}}</pre>
           All keys are optional.
           The response will contain a <code>Location</code> header
           telling you the URI of your new binding.
index 43e68c563839a28c4c97901e99461de414b5b279..74a321d07f58ec45597e6de98eedbca8e48602e3 100644 (file)
@@ -40,11 +40,24 @@ div.box, div.section, div.section-hidden { overflow: auto; width: 100%; }
 .right { float: right; }
 .clear { clear: both; }
 
-.help, .rate-options { color: #888; cursor: pointer; }
-.help:hover, .rate-options:hover { color: #444; }
+.help, .popup-options-link { color: #888; cursor: pointer; }
+.help:hover, .popup-options-link:hover { color: #444; }
 
-.tag-link { color: #444; cursor: pointer; }
-.tag-link:hover { color: #888; }
+.rate-visibility-option { cursor: pointer; padding: 4px; background: #fafafa; border: 1px solid #f0f0f0; border-radius: 3px; display:block; }
+.rate-visibility-option:hover { background: #ddf;
+                                background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #ddf),color-stop(1, #bbf));
+                                border: 1px solid #88d;
+                                border-radius: 3px; }
+
+.rate-visibility-option-hidden { text-decoration: line-through; color: #888; }
+
+
+table.legend { float: left; }
+table.legend th { padding: 4px 10px 4px 0; width: 80px; }
+table.legend td { padding: 4px 0 4px 10px; width: 130px; }
+
+.tag-link, .argument-link { color: #444; cursor: pointer; }
+.tag-link:hover, .argument-link:hover { color: #888; }
 
 .filter { overflow: auto; width: 100%; margin-bottom: 10px; }
 .filter table { float: left; }
@@ -59,18 +72,23 @@ input#truncate { width: 50px; text-align: right; }
 
 table { border-collapse: collapse; }
 table th { font-weight: normal; color: black; }
-table th, table td { font: 12px/17px Verdana,sans-serif; padding: 4px; }
+table th, table td { font: 12px Verdana,sans-serif; padding: 5px 4px; }
 table.list th, table.list td { vertical-align: top; min-width: 5em; width: auto; }
 
-table.list { border-width: 1px; border-bottom: 1px solid #ccc; margin-bottom: 1em; }
-table.list th, table.list td { border-left: 1px solid #ccc; border-right: 1px solid #ccc; }
-table.list th { text-align: center; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; }
+table.list { border-width: 1px; margin-bottom: 1em; }
+table.list th, table.list td { border: 1px solid #ccc; }
+table.list th { text-align: center; }
+table.list th.plus-minus { border: none; min-width: 2em; }
 table.list td a { display: block; width: 100%; }
 table.list th a.sort { display: block; width: 100%; cursor: pointer; }
 table.list th a.sort .arrow { color: #888; }
 table.list td p { margin: 0; padding: 1px 0 0 0; }
 table.list td p.warning { margin: 0; padding: 5px; }
 
+table.list td.plain, table.list td.plain td, table.list td.plain th { border: none; background: none; }
+table.list th.plain { border-left: none; border-top: none; border-right: none; background: none; }
+table.list th.plain h3 { margin: 0; border: 0; }
+
 #main .internal-purpose, #main .internal-purpose * { color: #aaa; }
 
 div.section table.list, div.section-hidden table.list { margin-bottom: 0; }
@@ -81,33 +99,37 @@ div.colour-key { float: left; width: 10px; height: 10px; margin: 3px 5px 0 0;}
 div.memory-info { float: left; padding: 10px 10px 0 0; }
 button.memory-button { margin-top: 10px; }
 
-div.memory_connection_procs { background: #955300; }
-div.memory_queue_procs      { background: #da7900; }
-div.memory_plugins          { background: #ffc884; }
-div.memory_other_proc       { background: #fff4e7; }
-div.memory_mnesia           { background: #005395; }
-div.memory_msg_index        { background: #0079da; }
-div.memory_mgmt_db          { background: #84c8ff; }
-div.memory_other_ets        { background: #e7f4ff; }
-div.memory_binary           { background: #666; }
-div.memory_code             { background: #999; }
-div.memory_atom             { background: #bbb; }
-div.memory_other_system     { background: #ddd; }
+div.memory_queue  { background: #bd4688; }
+div.memory_binary { background: url(../img/bg-binary.png); }
+div.memory_conn   { background: #dada66; }
+div.memory_proc   { background: #6abf59; }
+div.memory_table  { background: #6679da; }
+div.memory_system { background: #999; }
+
+div.memory-bar div.memory_queue  { border-right: solid 1px #eb50a6; }
+div.memory-bar div.memory_binary { border-right: solid 1px #eb50a6; }
+div.memory-bar div.memory_conn   { border-right: solid 1px #ebeb8d; }
+div.memory-bar div.memory_proc   { border-right: solid 1px #79da66; }
+div.memory-bar div.memory_table  { border-right: solid 1px #8d9ceb; }
+div.memory-bar div.memory_system { border-right: solid 1px #bbb; }
 
 sub { display: block; font-size: 0.8em; color: #888; }
 small { font-size: 0.8em; color: #888; }
 #main sub a { color: #888; }
 #main sub a:hover { color: #444; }
+table.argument-links { color: #888; }
+table.argument-links td { font-size: 0.64em; vertical-align: top; }
 .unknown { color: #888; }
 
-table.facts { float: left; margin-right: 50px; }
-table.facts th { color: black; text-align: right; border-right: 1px solid #ccc; }
+table.facts { float: left; }
+table.facts th, table.legend th { color: black; text-align: right; border-right: 1px solid #ccc; }
 table.facts th, table.facts td { vertical-align: top; padding: 0 10px 10px 10px; }
+table.facts th.horizontal { border-right: none; padding: 0 10px 5px 10px; }
 
 table.facts-long th { text-align: right; font-weight: bold; }
 table.facts-long th, table.facts-long td { vertical-align: top; }
 
-table.facts-fixed-width th, table.facts-fixed-width td { width: 130px; }
+table.facts-l { margin-right: 50px; }
 
 table.mini th { border: none; padding: 0 2px 2px 2px; text-align: right; }
 table.mini td { border: none; padding: 0 2px 2px 2px; }
@@ -144,9 +166,9 @@ p.warning, div.form-popup-warn { background: #ff8; }
 div.form-popup-info { background: #8f8; }
 div.form-popup-help { text-align: left !important; background: #f8f8f8; border: 1px solid #ccc; }
 div.form-popup-warn, div.form-popup-info, div.form-popup-help { margin: 20px; padding: 15px; border-radius: 10px; -moz-border-radius: 10px; text-align: center; max-width: 600px; z-index: 1; display: none; position: fixed; min-width: 500px; }
-div.form-popup-warn span, div.form-popup-info span, div.form-popup-help span, div.form-popup-rate-options span { color: black; font-weight: bold; cursor: pointer; }
+div.form-popup-warn span, div.form-popup-info span, div.form-popup-help span, div.form-popup-options span { color: black; font-weight: bold; cursor: pointer; }
 
-div.form-popup-rate-options {
+div.form-popup-options {
     z-index: 1; position: absolute; right: 35px; padding: 15px; background: white; border-left: 1px solid #ccc; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; border-radius: 10px 0 0 10px; -moz-border-radius: 10px 0 0 10px;
 }
 
@@ -163,8 +185,6 @@ p.status-error, p.warning { margin: 20px; padding: 15px; border-radius: 10px; -m
 .chart-medium { width: 600px; height: 200px; }
 .chart-large  { width: 800px; height: 300px; }
 
-.chart-legend { float: left; }
-
 .micro-highlight { min-width: 120px; font-size: 100%; text-align:center; padding:10px; background-color: #ddd; margin: 0 20px 0 0; color: #888; border-radius: 10px; -moz-border-radius: 10px; }
 .micro-highlight a { font-weight: normal !important; color: #888 !important; }
 .micro-highlight strong { font-size: 120%; color: #444; font-weight: normal; }
@@ -205,7 +225,7 @@ table.form table.subform th, table.form table.subform td { padding: 0; }
 
 .multifield-sub { border: 1px solid #ddd; background: #f8f8f8; padding: 10px; border-radius: 10px; -moz-border-radius: 10px; float: left; margin-bottom: 10px; }
 
-label.radio { padding: 5px; border: 1px solid #eee; cursor: pointer; border-radius: 5px; -moz-border-radius: 5px; }
+label.radio, label.checkbox { padding: 5px; border: 1px solid #eee; cursor: pointer; border-radius: 5px; -moz-border-radius: 5px; }
 
 table.two-col-layout { width: 100%; }
 table.two-col-layout > tbody > tr > td { width: 50%; vertical-align: top; }
@@ -232,6 +252,9 @@ h3 { padding: 0 0 2px 0; margin: 1em 0 1em 0; font-size: 1em; border-bottom: 1px
 
 acronym { background: #add; color: #222; padding: 2px 4px; border-radius: 2px; -moz-border-radius: 2px; border: none; cursor: default; }
 
+acronym.policy { background: none; border: 2px solid #add; padding: 0 2px; }
+table.list td acronym a { display: inline; width: auto; }
+
 acronym.warning { background: #daa; }
 
 .status-red acronym, .status-yellow acronym, .status-green acronym, .status-grey acronym, small acronym, acronym.normal { background: none; color: inherit; padding: 0; border-bottom: 1px dotted; cursor: default; }
index f95f528d7e3ab93cb5c7e8477aca11f58a0dd9cb..fd977f7ed310378d859a01552f9d303d2b9e039a 100644 (file)
       set an age and an increment for the samples you want. The end of
       the range returned will always correspond to the present.
     </p>
+
+    <p>
+      Different types of data take different query parameters to
+      return samples, as in the following table. You can specify more
+      than one set of parameters if the resource you are requesting
+      can generate more than one type of sample (for example, queues
+      can return message rates and queue lengths).
+    </p>
+
+    <table>
+      <tr>
+        <td>Messages sent and received</td>
+        <td><code>msg_rates_age</code> / <code>msg_rates_incr</code></td>
+      </tr>
+      <tr>
+        <td>Bytes sent and received</td>
+        <td><code>data_rates_age</code> / <code>data_rates_incr</code>
+        </td>
+      </tr>
+      <tr>
+        <td>Queue lengths</td>
+        <td><code>lengths_age</code> / <code>lengths_incr</code></td>
+      </tr>
+      <tr>
+        <td>Node statistics (e.g. file descriptors, disk space free)</td>
+        <td><code>node_stats_age</code> / <code>node_stats_incr</code></td>
+      </tr>
+    </table>
+
     <p>
-      Use <code>msg_rates_age</code>
-      and <code>msg_rates_incr</code> to return samples for messages
-      sent and received, <code>data_rates_age</code>
-      and <code>data_rates_incr</code> to return samples for bytes
-      sent and received, and <code>lengths_age</code>
-      and <code>lengths_incr</code> to return samples for queue
-      lengths. For example,
+      For example,
       appending <code>?lengths_age=3600&lengths_incr=60</code> will
       return the last hour's data on queue lengths, with a sample for
       every minute.
 
     <h2>Detailed message stats objects</h2>
     <p>
-      In addition, queues, exchanges and channels will return a
+      In addition, queues, exchanges and channels can return a
       breakdown of message stats for each of their neighbours
       (i.e. adjacent objects in the chain: channel -> exchange ->
-      queue -> channel).
+      queue -> channel). This will only happen if
+      the <code>rates_mode</code> configuration item has been switched
+      to <code>detailed</code> from its default of <code>basic</code>.
     </p>
     <p>
-      As this possibly constitutes a large quantity of data, it is
+      As this possibly constitutes a large quantity of data, it is also
       only returned when querying a single channel, queue or exchange
       rather than a list. Note also that the default sample retention
       policy means that these detailed message stats do not retain
           set_cluster_name</code>.
         </td>
       </tr>
+      <tr>
+        <td><code>contexts</code></td>
+        <td>
+          A list of web application contexts in the cluster.
+        </td>
+      </tr>
       <tr>
         <td><code>erlang_full_version</code></td>
         <td>
         </td>
       </tr>
       <tr>
-        <td><code>statistics_db_node</code></td>
+        <td><code>rates_mode</code></td>
         <td>
-          Name of the cluster node hosting the management statistics database.
+          'none', 'basic' or 'detailed'.
+        </td>
+      </tr>
+      <tr>
+        <td><code>statistics_db_event_queue</code></td>
+        <td>
+          Number of outstanding statistics events yet to be processed
+          by the database.
         </td>
       </tr>
       <tr>
-        <td><code>statistics_level</code></td>
+        <td><code>statistics_db_node</code></td>
         <td>
-          Whether the node is running fine or coarse statistics.
+          Name of the cluster node hosting the management statistics database.
         </td>
       </tr>
     </table>
           List of all SASL authentication mechanisms installed on the node.
         </td>
       </tr>
+      <tr>
+        <td><code>cluster_links</code></td>
+        <td>
+          A list of the other nodes in the cluster. For each node,
+          there are details of the TCP connection used to connect to
+          it and statistics on data that has been transferred.
+        </td>
+      </tr>
+      <tr>
+        <td><code>config_files</code></td>
+        <td>
+          List of config files read by the node.
+        </td>
+      </tr>
       <tr>
         <td><code>contexts</code></td>
         <td>
           List of all HTTP listeners on the node.
         </td>
       </tr>
+      <tr>
+        <td><code>db_dir</code></td>
+        <td>
+          Location of the persistent storage used by the node.
+        </td>
+      </tr>
       <tr>
         <td><code>disk_free</code></td>
         <td>
           Point at which the disk alarm will go off.
         </td>
       </tr>
+      <tr>
+        <td><code>enabled_plugins</code></td>
+        <td>
+          List of plugins which are both explicitly enabled and running.
+        </td>
+      </tr>
       <tr>
         <td><code>exchange_types</code></td>
         <td>
           Used file descriptors.
         </td>
       </tr>
+      <tr>
+        <td><code>io_read_avg_time</code></td>
+        <td>
+          Average wall time (milliseconds) for each disk read operation in
+          the last statistics interval.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_read_bytes</code></td>
+        <td>
+          Total number of bytes read from disk by the persister.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_read_count</code></td>
+        <td>
+          Total number of read operations by the persister.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_reopen_count</code></td>
+        <td>
+          Total number of times the persister has needed to recycle
+          file handles between queues. In an ideal world this number
+          will be zero; if the number is large, performance might be
+          improved by increasing the number of file handles available
+          to RabbitMQ.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_seek_avg_time</code></td>
+        <td>
+          Average wall time (milliseconds) for each seek operation in
+          the last statistics interval.
+        </td>
+      </tr>
+      </tr>
+      <tr>
+        <td><code>io_seek_count</code></td>
+        <td>
+          Total number of seek operations by the persister.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_sync_avg_time</code></td>
+        <td>
+          Average wall time (milliseconds) for each fsync() operation in
+          the last statistics interval.
+        </td>
+      </tr>
+      </tr>
+      <tr>
+        <td><code>io_sync_count</code></td>
+        <td>
+          Total number of fsync() operations by the persister.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_write_avg_time</code></td>
+        <td>
+          Average wall time (milliseconds) for each disk write operation in
+          the last statistics interval.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_write_bytes</code></td>
+        <td>
+          Total number of bytes written to disk by the persister.
+        </td>
+      </tr>
+      <tr>
+        <td><code>io_write_count</code></td>
+        <td>
+          Total number of write operations by the persister.
+        </td>
+      </tr>
+      <tr>
+        <td><code>log_file</code></td>
+        <td>
+          Location of main log file.
+        </td>
+      </tr>
       <tr>
         <td><code>mem_used</code></td>
         <td>
           Point at which the memory alarm will go off.
         </td>
       </tr>
+      <tr>
+        <td><code>mnesia_disk_tx_count</code></td>
+        <td>
+          Number of Mnesia transactions which have been performed that
+          required writes to disk. (e.g. creating a durable
+          queue). Only transactions which originated on this node are
+          included.
+        </td>
+      </tr>
+      <tr>
+        <td><code>mnesia_ram_tx_count</code></td>
+        <td>
+          Number of Mnesia transactions which have been performed that
+          did not require writes to disk. (e.g. creating a transient
+          queue). Only transactions which originated on this node are
+          included.
+        </td>
+      </tr>
+      <tr>
+        <td><code>msg_store_read_count</code></td>
+        <td>
+          Number of messages which have been read from the message store.
+        </td>
+      </tr>
+      <tr>
+        <td><code>msg_store_write_count</code></td>
+        <td>
+          Number of messages which have been written to the message store.
+        </td>
+      </tr>
       <tr>
         <td><code>name</code></td>
         <td>
           Node name.
         </td>
       </tr>
+      <tr>
+        <td><code>net_ticktime</code></td>
+        <td>
+          Current kernel net_ticktime setting for the node.
+        </td>
+      </tr>
       <tr>
         <td><code>os_pid</code></td>
         <td>
           Number of cores detected and usable by Erlang.
         </td>
       </tr>
+      <tr>
+        <td><code>queue_index_journal_write_count</code></td>
+        <td>
+          Number of records written to the queue index journal. Each
+          record represents a message being published to a queue,
+          being delivered from a queue, and being acknowledged in a
+          queue.
+        </td>
+      </tr>
+      <tr>
+        <td><code>queue_index_read_count</code></td>
+        <td>
+          Number of records read from the queue index.
+        </td>
+      </tr>
+      <tr>
+        <td><code>queue_index_write_count</code></td>
+        <td>
+          Number of records written to the queue index.
+        </td>
+      </tr>
+      <tr>
+        <td><code>rates_mode</code></td>
+        <td>
+          'none', 'basic' or 'detailed'.
+        </td>
+      </tr>
       <tr>
         <td><code>run_queue</code></td>
         <td>
         </td>
       </tr>
       <tr>
-        <td><code>sockets_total</code></td>
+        <td><code>sasl_log_file</code></td>
         <td>
-          File descriptors available for use as sockets.
+          Location of <a href="http://www.erlang.org/doc/man/sasl_app.html">sasl</a> log file.
         </td>
       </tr>
       <tr>
-        <td><code>sockets_used</code></td>
+        <td><code>sockets_total</code></td>
         <td>
-          File descriptors used as sockets.
+          File descriptors available for use as sockets.
         </td>
       </tr>
       <tr>
-        <td><code>statistics_level</code></td>
+        <td><code>sockets_used</code></td>
         <td>
-          'fine' or 'coarse'.
+          File descriptors used as sockets.
         </td>
       </tr>
       <tr>
           if <code>?memory=true</code> is appended to the URL.
         </td>
       </tr>
+      <tr>
+        <td><code>binary</code></td>
+        <td>
+          Detailed breakdown of the owners of binary memory. Only
+          appears if <code>?binary=true</code> is appended to the
+          URL. Note that this can be an expensive query if there are
+          many small binaries in the system.
+        </td>
+      </tr>
     </table>
 
     <h2>/api/connections</h2>
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/img/bg-binary.png b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/img/bg-binary.png
new file mode 100644 (file)
index 0000000..dc136bf
Binary files /dev/null and b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/img/bg-binary.png differ
index ed28295804df6e31d72a502477453daf218cae93..0ec370fb3219881ab2cb2774c9144e441019136e 100644 (file)
+//
+// Formatting side
+//
+
+function message_rates(id, stats) {
+    var items = [['Publish', 'publish'], ['Confirm', 'confirm'],
+                 ['Publish (In)', 'publish_in'],
+                 ['Publish (Out)', 'publish_out'],
+                 ['Deliver', 'deliver'],
+                 ['Redelivered', 'redeliver'],
+                 ['Acknowledge', 'ack'],
+                 ['Get', 'get'], ['Deliver (noack)', 'deliver_no_ack'],
+                 ['Get (noack)', 'get_no_ack'],
+                 ['Return', 'return_unroutable'],
+                 ['Disk read', 'disk_reads'],
+                 ['Disk write', 'disk_writes']];
+    return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_axis, true, 'Message rates', 'message-rates');
+}
+
+function queue_lengths(id, stats) {
+    var items = [['Ready', 'messages_ready'],
+                 ['Unacked', 'messages_unacknowledged'],
+                 ['Total', 'messages']];
+    return rates_chart_or_text(id, stats, items, fmt_num_thousands, fmt_plain_axis, false, 'Queued messages', 'queued-messages');
+}
+
+function data_rates(id, stats) {
+    var items = [['From client', 'recv_oct'], ['To client', 'send_oct']];
+    return rates_chart_or_text(id, stats, items, fmt_rate_bytes, fmt_rate_bytes_axis, true, 'Data rates');
+}
+
+function rates_chart_or_text(id, stats, items, fmt, axis_fmt, chart_rates,
+                             heading, heading_help) {
+    var prefix = chart_h3(id, heading, heading_help);
+
+    return prefix + rates_chart_or_text_no_heading(
+            id, id, stats, items, fmt, axis_fmt, chart_rates);
+}
+
+function rates_chart_or_text_no_heading(type_id, id, stats, items,
+                                        fmt, axis_fmt, chart_rates) {
+    var mode = get_pref('rate-mode-' + type_id);
+    var range = get_pref('chart-range');
+    var res;
+    if (keys(stats).length > 0) {
+        if (mode == 'chart') {
+            res = rates_chart(
+                type_id, id, items, stats, fmt, axis_fmt, 'full', chart_rates);
+        }
+        else {
+            res = rates_text(items, stats, mode, fmt, chart_rates);
+        }
+        if (res == "") res = '<p>Waiting for data...</p>';
+    }
+    else {
+        res = '<p>Currently idle</p>';
+    }
+    return res;
+}
+
+function chart_h3(id, heading, heading_help) {
+    var mode = get_pref('rate-mode-' + id);
+    var range = get_pref('chart-range');
+    return '<h3>' + heading +
+        ' <span class="popup-options-link" title="Click to change" ' +
+        'type="rate" for="' + id + '">(' + prefix_title(mode, range) +
+        ')</span>' + (heading_help == undefined ? '' :
+         ' <span class="help" id="' + heading_help + '"></span>') +
+        '</h3>';
+}
+
+function prefix_title(mode, range) {
+    var desc = CHART_PERIODS[range];
+    if (mode == 'chart') {
+        return 'chart: ' + desc.toLowerCase();
+    }
+    else if (mode == 'curr') {
+        return 'current value';
+    }
+    else {
+        return 'moving average: ' + desc.toLowerCase();
+    }
+}
+
+function node_stat_count(used_key, limit_key, stats, thresholds) {
+    var used = stats[used_key];
+    var limit = stats[limit_key];
+    if (typeof used == 'number') {
+        return node_stat(used_key, 'Used', limit_key, 'available', stats,
+                         fmt_plain, fmt_plain_axis,
+                         fmt_color(used / limit, thresholds));
+    } else {
+        return used;
+    }
+}
+
+function node_stat_count_bar(used_key, limit_key, stats, thresholds) {
+    var used = stats[used_key];
+    var limit = stats[limit_key];
+    if (typeof used == 'number') {
+        return node_stat_bar(used_key, limit_key, 'available', stats,
+                             fmt_plain_axis,
+                             fmt_color(used / limit, thresholds));
+    } else {
+        return used;
+    }
+}
+
+function node_stat(used_key, used_name, limit_key, suffix, stats, fmt,
+                   axis_fmt, colour, help, invert) {
+    if (get_pref('rate-mode-node-stats') == 'chart') {
+        var items = [[used_name, used_key], ['Limit', limit_key]];
+        add_fake_limit_details(used_key, limit_key, stats);
+        return rates_chart('node-stats', 'node-stats-' + used_key, items, stats,
+                           fmt, axis_fmt, 'node', false);
+    } else {
+        return node_stat_bar(used_key, limit_key, suffix, stats, axis_fmt,
+                             colour, help, invert);
+    }
+}
+
+function add_fake_limit_details(used_key, limit_key, stats) {
+    var source = stats[used_key + '_details'].samples;
+    var limit = stats[limit_key];
+    var dest = [];
+    for (var i in source) {
+        dest[i] = {sample: limit, timestamp: source[i].timestamp};
+    }
+    stats[limit_key + '_details'] = {samples: dest};
+}
+
+function node_stat_bar(used_key, limit_key, suffix, stats, fmt, colour,
+                       help, invert) {
+    var used = stats[used_key];
+    var limit = stats[limit_key];
+    var width = 120;
+
+    var res = '';
+    var other_colour = colour;
+    var ratio = invert ? (limit / used) : (used / limit);
+    if (ratio > 1) {
+        ratio = 1 / ratio;
+        inverted = true;
+        colour += '-dark';
+    }
+    else {
+        other_colour += '-dark';
+    }
+    var offset = Math.round(width * (1 - ratio));
+
+    res += '<div class="status-bar" style="width: ' + width + 'px;">';
+    res += '<div class="status-bar-main ' + colour + '" style="background-image: url(img/bg-' + other_colour + '.png); background-position: -' + offset + 'px 0px; background-repeat: no-repeat;">';
+    res += fmt(used);
+    if (help != null) {
+        res += ' <span class="help" id="' + help + '"></span>';
+    }
+    res += '</div>'; // status-bar-main
+    res += '<sub>' + fmt(limit) + ' ' + suffix + '</sub>';
+    res += '</div>'; // status-bar
+
+    return res;
+}
+
+function node_stats_prefs() {
+    return chart_h3('node-stats', 'Node statistics');
+}
+
+function rates_chart(type_id, id, items, stats, fmt, axis_fmt, type,
+                     chart_rates) {
+    function show(key) {
+        return get_pref('chart-line-' + id + key) === 'true';
+    }
+
+    var size = get_pref('chart-size-' + type_id);
+    var legend = [];
+    chart_data[id] = {};
+    chart_data[id]['data'] = {};
+    chart_data[id]['fmt'] = axis_fmt;
+    var ix = 0;
+    for (var i in items) {
+        var name = items[i][0];
+        var key = items[i][1];
+        var key_details = key + '_details';
+        if (key_details in stats) {
+            if (show(key)) {
+                chart_data[id]['data'][name] = stats[key_details];
+                chart_data[id]['data'][name].ix = ix;
+            }
+            var value = chart_rates ? pick_rate(fmt, stats, key) :
+                                      pick_abs(fmt, stats, key);
+            legend.push({name:  name,
+                         key:   key,
+                         value: value,
+                         show:  show(key)});
+            ix++;
+        }
+    }
+    var html = '<div class="box"><div id="chart-' + id +
+        '" class="chart chart-' + type + ' chart-' + size +
+        (chart_rates ? ' chart-rates' : '') + '"></div>';
+    html += '<table class="legend">';
+    for (var i = 0; i < legend.length; i++) {
+        if (i % 3 == 0 && i < legend.length - 1) {
+            html += '</table><table class="legend">';
+        }
+
+        html += '<tr><th><span title="Click to toggle line" ';
+        html += 'class="rate-visibility-option';
+        html += legend[i].show ? '' : ' rate-visibility-option-hidden';
+        html += '" data-pref="chart-line-' + id + legend[i].key + '">';
+        html += legend[i].name + '</span></th><td>';
+        html += '<div class="colour-key" style="background: ' + chart_colors[type][i];
+        html += ';"></div>' + legend[i].value + '</td></tr>'
+    }
+    html += '</table></div>';
+    return legend.length > 0 ? html : '';
+}
+
+function rates_text(items, stats, mode, fmt, chart_rates) {
+    var res = '';
+    for (var i in items) {
+        var name = items[i][0];
+        var key = items[i][1];
+        var key_details = key + '_details';
+        if (key_details in stats) {
+            var details = stats[key_details];
+            res += '<div class="highlight">' + name + '<strong>';
+            res += chart_rates ? pick_rate(fmt, stats, key, mode) :
+                                 pick_abs(fmt, stats, key, mode);
+            res += '</strong></div>';
+        }
+    }
+    return res == '' ? '' : '<div class="box">' + res + '</div>';
+}
+
+//
+// Rendering side
+//
+
 function render_charts() {
     $('.chart').map(function() {
         render_chart($(this));
     });
 }
 
-var chart_colors = ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed', '#666666', '#aaaaaa'];
+var chart_colors = {full: ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed', '#666666', '#aaaaaa'],
+                    node: ['#6ae26a', '#e24545']};
 
 var chart_chrome = {
     series: { lines: { show: true } },
@@ -14,17 +254,24 @@ var chart_chrome = {
     legend: { show: false }
 };
 
+function chart_fill(mode, i) {
+    return mode =='node' && i == 0;
+}
+
 function render_chart(div) {
     var id = div.attr('id').substring('chart-'.length);
     var rate_mode = div.hasClass('chart-rates');
-
     var out_data = [];
-    var i = 0;
     var data = chart_data[id]['data'];
     var fmt = chart_data[id]['fmt'];
+
+    var mode = div.hasClass('chart-full') ? 'full': 'node';
+    var colors = chart_colors[mode];
+
     for (var name in data) {
         var series = data[name];
         var samples = series.samples;
+        var i = series.ix;
         var d = [];
         for (var j = 1; j < samples.length; j++) {
             var x = samples[j].timestamp;
@@ -43,8 +290,8 @@ function render_chart(div) {
             }
             d.push([x, y]);
         }
-        out_data.push({data: d, color: chart_colors[i], shadowSize: 0});
-        i++;
+        out_data.push({data: d, color: colors[i], shadowSize: 0,
+                       lines: {show: true, fill: chart_fill(mode, i)}});
     }
     chart_data[id] = {};
 
@@ -67,6 +314,6 @@ function update_rate_options(sammy) {
     var id = sammy.params['id'];
     store_pref('rate-mode-' + id, sammy.params['mode']);
     store_pref('chart-size-' + id, sammy.params['size']);
-    store_pref('chart-range-' + id, sammy.params['range']);
+    store_pref('chart-range', sammy.params['range']);
     partial_update();
 }
index db4788105b7d7913db4dea0232ed2b8dd05b2b89..4c2d6700102aca3a0515606e2392fcb5c191fb14 100644 (file)
@@ -26,9 +26,10 @@ dispatcher_add(function(sammy) {
 
     sammy.get('#/nodes/:name', function() {
             var name = esc(this.params['name']);
-            render({'node': '/nodes/' + name},
+            render({'node': {path:    '/nodes/' + name,
+                             options: {ranges: ['node-stats']}}},
                    'node', '');
-        });
+            });
 
     path('#/connections',
          {'connections': {path: '/connections', options: {sort:true}}},
@@ -214,7 +215,7 @@ dispatcher_add(function(sammy) {
         });
 
     sammy.put('#/logout', function() {
-            document.cookie = 'auth=; expires=Thu, 01 Jan 1970 00:00:00 GMT';
+            clear_pref('auth');
             location.reload();
         });
 
@@ -224,4 +225,7 @@ dispatcher_add(function(sammy) {
     sammy.put('#/rate-options', function() {
             update_rate_options(this);
         });
+    sammy.put('#/column-options', function() {
+            update_column_options(this);
+        });
 });
index e4e3923f5d7d52cb4a4763a0aa21231dc3ee3fac..8f7f8138af4da4c94d4927c75d6396c3e01aa095 100644 (file)
@@ -12,11 +12,6 @@ function fmt_string(str, unknown) {
     return fmt_escape_html("" + str);
 }
 
-function fmt_bytes(bytes) {
-    if (bytes == undefined) return UNKNOWN_REPR;
-    return fmt_si_prefix(bytes, bytes, 1024, false) + 'B';
-}
-
 function fmt_si_prefix(num0, max0, thousand, allow_fractions) {
     if (num == 0) return 0;
 
@@ -34,25 +29,39 @@ function fmt_si_prefix(num0, max0, thousand, allow_fractions) {
             num.toFixed(0)) + powers[power];
 }
 
-function fmt_memory(memory, key) {
-    return '<div class="colour-key memory_' + key + '"></div>' +
-        fmt_bytes(memory[key]);
-}
-
-function fmt_boolean(b) {
-    if (b == undefined) return UNKNOWN_REPR;
+function fmt_boolean(b, unknown) {
+    if (unknown == undefined) unknown = UNKNOWN_REPR;
+    if (b == undefined) return unknown;
 
     return b ? "&#9679;" : "&#9675;";
 }
 
 function fmt_date(d) {
+    var res = fmt_date0(d);
+    return res[0] + ' ' + res[1];
+}
+
+function fmt_date_mini(d) {
+    var res = fmt_date0(d);
+    return res[1] + '<sub>' + res[0] + '</sub>';
+}
+
+function fmt_date0(d) {
     function f(i) {
         return i < 10 ? "0" + i : i;
     }
 
-    return d.getFullYear() + "-" + f(d.getMonth() + 1) + "-" +
-        f(d.getDate()) + " " + f(d.getHours()) + ":" + f(d.getMinutes()) +
-        ":" + f(d.getSeconds());
+    return [d.getFullYear() + "-" + f(d.getMonth() + 1) + "-" +
+            f(d.getDate()), f(d.getHours()) + ":" + f(d.getMinutes()) +
+        ":" + f(d.getSeconds())];
+}
+
+function fmt_timestamp(ts) {
+    return fmt_date(new Date(ts));
+}
+
+function fmt_timestamp_mini(ts) {
+    return fmt_date_mini(new Date(ts));
 }
 
 function fmt_time(t, suffix) {
@@ -64,24 +73,39 @@ function fmt_millis(millis) {
     return Math.round(millis / 1000) + "s";
 }
 
-function fmt_parameters(obj) {
-    return fmt_table_short(args_to_params(obj));
+function fmt_features(obj) {
+    return fmt_table_short(args_to_features(obj));
 }
 
-function fmt_parameters_short(obj) {
+function fmt_policy_short(obj) {
+    if (obj.policy != undefined && obj.policy != '') {
+        return '<acronym class="policy" title="Policy: ' +
+            fmt_escape_html(obj.policy) + '">' +
+            fmt_escape_html(obj.policy) + '</acronym> ';
+    } else {
+        return '';
+    }
+}
+
+function fmt_features_short(obj) {
     var res = '';
-    var params = args_to_params(obj);
+    var features = args_to_features(obj);
+
+    if (obj.owner_pid_details != undefined) {
+        res += '<acronym title="Exclusive queue: click for owning connection">'
+            + link_conn(obj.owner_pid_details.name, "Excl") + '</acronym> ';
+    }
 
     for (var k in ALL_ARGS) {
-        if (params[k] != undefined) {
-            res += '<acronym title="' + k + ': ' + fmt_string(params[k]) +
+        if (features[k] != undefined) {
+            res += '<acronym title="' + k + ': ' + fmt_string(features[k]) +
                 '">' + ALL_ARGS[k].short + '</acronym> ';
         }
     }
 
-    if (params.arguments) {
-        res += '<acronym title="' + fmt_table_flat(params.arguments) +
-        '">Args</acronym>';
+    if (features.arguments) {
+        res += '<acronym title="' + fmt_table_flat(features.arguments) +
+        '">Args</acronym> ';
     }
     return res;
 }
@@ -98,7 +122,7 @@ function short_chan(name) {
     return (match != null && match.length == 3) ? match[1] + match[2] : name;
 }
 
-function args_to_params(obj) {
+function args_to_features(obj) {
     var res = {};
     for (var k in obj.arguments) {
         if (k in KNOWN_ARGS) {
@@ -177,14 +201,6 @@ function fmt_color(r, thresholds) {
     return 'green';
 }
 
-function fmt_deliver_rate(obj, show_redeliver) {
-    var res = fmt_rate(obj, 'deliver_get');
-    if (show_redeliver) {
-        res += '<sub>' + fmt_rate(obj, 'redeliver') + '</sub>';
-    }
-    return res;
-}
-
 function fmt_rate_num(num) {
     if (num == undefined) return UNKNOWN_REPR;
     else if (num < 1)     return num.toFixed(2);
@@ -207,87 +223,76 @@ function fmt_percent(num) {
     }
 }
 
-function fmt_rate(obj, name, mode) {
-    var raw = fmt_rate0(obj, name, mode, fmt_rate_num);
-    return raw == '' ? '' : (raw + '/s');
+function pick_rate(fmt, obj, name, mode) {
+    if (obj == undefined || obj[name] == undefined ||
+        obj[name + '_details'] == undefined) return '';
+    var details = obj[name + '_details'];
+    return fmt(mode == 'avg' ? details.avg_rate : details.rate);
 }
 
-function fmt_rate_bytes(obj, name, mode) {
-    var raw = fmt_rate0(obj, name, mode, fmt_bytes);
-    return raw == '' ? '' : (raw + '/s' +
-                             '<sub>(' + fmt_bytes(obj[name]) + ' total)</sub>');
+function pick_abs(fmt, obj, name, mode) {
+    if (obj == undefined || obj[name] == undefined ||
+        obj[name + '_details'] == undefined) return '';
+    var details = obj[name + '_details'];
+    return fmt(mode == 'avg' ? details.avg : obj[name]);
 }
 
-function fmt_rate_large(obj, name, mode) {
-    return '<strong>' + fmt_rate0(obj, name, mode, fmt_rate_num) +
-        '</strong>msg/s';
+function fmt_detail_rate(obj, name, mode) {
+    return pick_rate(fmt_rate, obj, name, mode);
 }
 
-function fmt_rate_bytes_large(obj, name, mode) {
-    return '<strong>' + fmt_rate0(obj, name, mode, fmt_bytes) + '/s</strong>' +
-        '(' + fmt_bytes(obj[name]) + ' total)';
+function fmt_detail_rate_bytes(obj, name, mode) {
+    return pick_rate(fmt_rate_bytes, obj, name, mode);
 }
 
-function fmt_rate0(obj, name, mode, fmt) {
-    if (obj == undefined || obj[name] == undefined ||
-        obj[name + '_details'] == undefined) return '';
-    var details = obj[name + '_details'];
-    return fmt(mode == 'avg' ? details.avg_rate : details.rate);
+// ---------------------------------------------------------------------
+
+// These are pluggable for charts etc
+
+function fmt_plain(num) {
+    return num;
 }
 
-function fmt_msgs(obj, name, mode) {
-    return fmt_msgs0(obj, name, mode) + ' msg';
+function fmt_plain_axis(num, max) {
+    return fmt_si_prefix(num, max, 1000, true);
 }
 
-function fmt_msgs_large(obj, name, mode) {
-    return '<strong>' + fmt_msgs0(obj, name, mode) + '</strong>' +
-        fmt_rate0(obj, name, mode, fmt_msgs_rate);
+function fmt_rate(num) {
+    return fmt_rate_num(num) + '/s';
 }
 
-function fmt_msgs0(obj, name, mode) {
-    if (obj == undefined || obj[name] == undefined ||
-        obj[name + '_details'] == undefined) return '';
-    var details = obj[name + '_details'];
-    return mode == 'avg' ? fmt_rate_num(details.avg) :
-        fmt_num_thousands(obj[name]);
+function fmt_rate_axis(num, max) {
+    return fmt_plain_axis(num, max) + '/s';
 }
 
-function fmt_msgs_rate(num) {
-    if (num > 0)      return '+' + fmt_rate_num(num)  + ' msg/s';
-    else if (num < 0) return '-' + fmt_rate_num(-num) + ' msg/s';
-    else              return '&nbsp;';
+function fmt_bytes(bytes) {
+    if (bytes == undefined) return UNKNOWN_REPR;
+    return fmt_si_prefix(bytes, bytes, 1024, false) + 'B';
 }
 
-function fmt_rate_axis(num, max) {
-    return fmt_si_prefix(num, max, 1000, true) + '/s';
+function fmt_bytes_axis(num, max) {
+    num = parseInt(num);
+    return fmt_bytes(isNaN(num) ? 0 : num);
 }
 
-function fmt_msgs_axis(num, max) {
-    return fmt_si_prefix(num, max, 1000, true);
+function fmt_rate_bytes(num) {
+    return fmt_bytes(num) + '/s';
 }
 
 function fmt_rate_bytes_axis(num, max) {
-    num = parseInt(num);
-    return fmt_bytes(isNaN(num) ? 0 : num) + '/s';
+    return fmt_bytes_axis(num, max) + '/s';
 }
 
-function is_stat_empty(obj, name) {
-    if (obj == undefined
-        || obj[name] == undefined
-        || obj[name + '_details'] == undefined
-        || obj[name + '_details'].rate < 0.00001) return true;
-    return false;
+function fmt_ms(num) {
+    return fmt_rate_num(num) + 'ms';
 }
 
-function is_col_empty(objects, name, accessor) {
-    if (accessor == undefined) accessor = function(o) {return o.message_stats;};
-    for (var i = 0; i < objects.length; i++) {
-        var object = objects[i];
-        if (!is_stat_empty(accessor(object), name)) {
-            return false;
-        }
-    }
-    return true;
+// ---------------------------------------------------------------------
+
+function fmt_maybe_vhost(name) {
+    return vhosts_interesting ?
+        ' in virtual host <b>' + fmt_escape_html(name) + '</b>'
+        : '';
 }
 
 function fmt_exchange(name) {
@@ -319,15 +324,6 @@ function fmt_download_filename(host) {
         (now.getMonth() + 1) + "-" + now.getDate() + ".json";
 }
 
-function fmt_fd_used(used, total) {
-    if (used == 'install_handle_from_sysinternals') {
-        return '<p class="c">handle.exe missing <span class="help" id="handle-exe"></span><sub>' + total + ' available</sub></p>';
-    }
-    else {
-        return used;
-    }
-}
-
 function fmt_table_short(table) {
     return '<table class="mini">' + fmt_table_body(table, ':') + '</table>';
 }
@@ -340,8 +336,8 @@ function fmt_table_long(table) {
 function fmt_table_body(table, x) {
     var res = '';
     for (k in table) {
-        res += '<tr><th>' + k + x + '</th><td>' + fmt_amqp_value(table[k]) +
-            '</td>';
+        res += '<tr><th>' + fmt_escape_html(k) + x + '</th>' +
+            '<td>' + fmt_amqp_value(table[k]) + '</td>';
     }
     return res;
 }
@@ -369,7 +365,7 @@ function fmt_amqp_value(val) {
 function fmt_table_flat(table) {
     var res = [];
     for (k in table) {
-        res.push(k + ': ' + fmt_amqp_value_flat(table[k]));
+        res.push(fmt_escape_html(k) + ': ' + fmt_amqp_value_flat(table[k]));
     }
     return res.join(', ');
 }
@@ -405,6 +401,30 @@ function fmt_uptime(u) {
         return min + 'm ' + sec + 's';
 }
 
+function fmt_plugins_small(node) {
+    if (node.applications === undefined) return '';
+    var plugins = [];
+    for (var i = 0; i < node.applications.length; i++) {
+        var application = node.applications[i];
+        if (jQuery.inArray(application.name, node.enabled_plugins) != -1 ) {
+            plugins.push(application.name);
+        }
+    }
+    return '<acronym title="Plugins: ' + plugins.join(", ") + '">' +
+        plugins.length + '</acronym>';
+}
+
+function get_plugins_list(node) {
+    var result = [];
+    for (var i = 0; i < node.applications.length; i++) {
+        var application = node.applications[i];
+        if (jQuery.inArray(application.name, node.enabled_plugins) != -1 ) {
+            result.push(application);
+        }
+    }
+    return result;
+}
+
 function fmt_rabbit_version(applications) {
     for (var i in applications) {
         if (applications[i].name == 'rabbit') {
@@ -453,7 +473,7 @@ function fmt_node(node_host) {
     var both = node_host.split('@');
     var node = both.slice(0, 1);
     var host = both.slice(1);
-    return '<small>' + node + '@</small>' + host;
+    return node == 'rabbit' ? host : (node + '@' + host);
 }
 
 function fmt_object_state(obj) {
@@ -481,6 +501,16 @@ function fmt_object_state(obj) {
         colour = 'yellow';
         explanation = 'Publishing rate recently restricted by server.';
     }
+    else if (obj.state == 'down') {
+        colour = 'red';
+        explanation = 'The queue is located on a cluster node or nodes that ' +
+            'are down.';
+    }
+    else if (obj.state == 'crashed') {
+        colour = 'red';
+        explanation = 'The queue has crashed repeatedly and been unable to ' +
+            'restart.';
+    }
 
     return fmt_state(colour, text, explanation);
 }
@@ -498,44 +528,6 @@ function fmt_state(colour, text, explanation) {
     return '<div class="colour-key status-key-' + colour + '"></div>' + key;
 }
 
-function fmt_resource_bar(used_label, limit_label, ratio, colour, help) {
-    var width = 120;
-
-    var res = '';
-    var other_colour = colour;
-    if (ratio > 1) {
-        ratio = 1 / ratio;
-        inverted = true;
-        colour += '-dark';
-    }
-    else {
-        other_colour += '-dark';
-    }
-    var offset = Math.round(width * (1 - ratio));
-
-    res += '<div class="status-bar" style="width: ' + width + 'px;">';
-    res += '<div class="status-bar-main ' + colour + '" style="background-image: url(img/bg-' + other_colour + '.png); background-position: -' + offset + 'px 0px; background-repeat: no-repeat;">';
-    res += used_label;
-    if (help != null) {
-        res += ' <span class="help" id="' + help + '"></span>';
-    }
-    res += '</div>'; // status-bar-main
-    if (limit_label != null) {
-        res += '<sub>' + limit_label + '</sub>';
-    }
-    res += '</div>'; // status-bar
-    return res;
-}
-
-function fmt_resource_bar_count(used, total, thresholds) {
-    if (typeof used == 'number') {
-        return fmt_resource_bar(used, total + ' available', used / total,
-                                fmt_color(used / total, thresholds));
-    } else {
-        return used;
-    }
-}
-
 function fmt_shortened_uri(uri) {
     if (typeof uri == 'object') {
         var res = '';
@@ -572,8 +564,9 @@ function fmt_client_name(properties) {
 
 function fmt_trunc(str, max_length) {
     return str.length > max_length ?
-        ('<acronym class="normal" title="' + str + '">' +
-         str.substring(0, max_length) + '...</acronym>') : str;
+        ('<acronym class="normal" title="' + fmt_escape_html(str) + '">' +
+         fmt_escape_html(str.substring(0, max_length)) + '...</acronym>') :
+        fmt_escape_html(str);
 }
 
 function alt_rows(i, args) {
@@ -662,115 +655,6 @@ function fmt_highlight_filter(text) {
     }
 }
 
-function message_rates(id, stats) {
-    var items = [['Publish', 'publish'], ['Confirm', 'confirm'],
-                 ['Publish (In)', 'publish_in'],
-                 ['Publish (Out)', 'publish_out'],
-                 ['Deliver', 'deliver'],
-                 ['Redelivered', 'redeliver'],
-                 ['Acknowledge', 'ack'],
-                 ['Get', 'get'], ['Deliver (noack)', 'deliver_no_ack'],
-                 ['Get (noack)', 'get_no_ack'],
-                 ['Return', 'return_unroutable']];
-    return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_large, fmt_rate_axis, true, 'Message rates', 'message-rates');
-}
-
-function queue_lengths(id, stats) {
-    var items = [['Ready', 'messages_ready'],
-                 ['Unacknowledged', 'messages_unacknowledged'],
-                 ['Total', 'messages']];
-    return rates_chart_or_text(id, stats, items, fmt_msgs, fmt_msgs_large, fmt_msgs_axis, false, 'Queued messages', 'queued-messages');
-}
-
-function data_rates(id, stats) {
-    var items = [['From client', 'recv_oct'], ['To client', 'send_oct']];
-    return rates_chart_or_text(id, stats, items, fmt_rate_bytes, fmt_rate_bytes_large, fmt_rate_bytes_axis, true, 'Data rates');
-}
-
-function rates_chart_or_text(id, stats, items, chart_fmt, text_fmt, axis_fmt, chart_rates,
-                             heading, heading_help) {
-    var mode = get_pref('rate-mode-' + id);
-    var range = get_pref('chart-range-' + id);
-    var prefix = '<h3>' + heading +
-        ' <span class="rate-options updatable" title="Click to change" for="'
-        + id + '">(' + prefix_title(mode, range) + ')</span>' +
-        (heading_help == undefined ? '' :
-         ' <span class="help" id="' + heading_help + '"></span>') +
-        '</h3>';
-    var res;
-
-    if (keys(stats).length > 0) {
-        if (mode == 'chart') {
-            res = rates_chart(id, items, stats, chart_fmt, axis_fmt, chart_rates);
-        }
-        else {
-            res = rates_text(items, stats, mode, text_fmt);
-        }
-        if (res == "") res = '<p>Waiting for data...</p>';
-    }
-    else {
-        res = '<p>Currently idle</p>';
-    }
-    return prefix + '<div class="updatable">' + res + '</div>';
-}
-
-function prefix_title(mode, range) {
-    var desc = CHART_PERIODS[range];
-    if (mode == 'chart') {
-        return 'chart: ' + desc.toLowerCase();
-    }
-    else if (mode == 'curr') {
-        return 'current value';
-    }
-    else {
-        return 'moving average: ' + desc.toLowerCase();
-    }
-}
-
-function rates_chart(id, items, stats, rate_fmt, axis_fmt, chart_rates) {
-    var size = get_pref('chart-size-' + id);
-    var show = [];
-    chart_data[id] = {};
-    chart_data[id]['data'] = {};
-    chart_data[id]['fmt'] = axis_fmt;
-    for (var i in items) {
-        var name = items[i][0];
-        var key = items[i][1];
-        var key_details = key + '_details';
-        if (key_details in stats) {
-            chart_data[id]['data'][name] = stats[key_details];
-            show.push([name, rate_fmt(stats, key)]);
-        }
-    }
-    var html = '<div class="box"><div id="chart-' + id +
-        '" class="chart chart-' + size +
-        (chart_rates ? ' chart-rates' : '') + '"></div>';
-    html += '<table class="facts facts-fixed-width">';
-    for (var i = 0; i < show.length; i++) {
-        html += '<tr><th>' + show[i][0] + '</th><td>';
-        html += '<div class="colour-key" style="background: ' + chart_colors[i];
-        html += ';"></div>' + show[i][1] + '</td></tr>'
-    }
-    html += '</table></div>';
-    return show.length > 0 ? html : '';
-}
-
-function rates_text(items, stats, mode, rate_fmt) {
-    var res = '';
-    for (var i in items) {
-        var name = items[i][0];
-        var key = items[i][1];
-        var key_details = key + '_details';
-        if (key_details in stats) {
-            var details = stats[key_details];
-            res += '<div class="highlight">' + name;
-            res += rate_fmt(stats, key, mode);
-            res += '</div>';
-        }
-    }
-    return res == '' ? '' : '<div class="box">' + res + '</div>';
-}
-
 function filter_ui(items) {
     current_truncate = (current_truncate == null) ?
         parseInt(get_pref('truncate')) : current_truncate;
@@ -850,12 +734,36 @@ function fmt_sort(display, sort) {
     return '<a class="sort" sort="' + sort + '">' + prefix + display + '</a>';
 }
 
+function group_count(mode, group, bools) {
+    var count = 0;
+    for (var i = 0; i < bools.length; i++) {
+        if (bools[i]) count++;
+    }
+
+    var options = COLUMNS[mode][group];
+    for (var i = 0; i < options.length; i++) {
+        var column = options[i][0];
+        if (show_column(mode, column)) count++;
+    }
+    return count;
+}
+
+function group_heading(mode, group, bools) {
+    var count = group_count(mode, group, bools);
+    if (count == 0) {
+        return '';
+    }
+    else {
+        return '<th colspan="' + count + '">' + group + '</th>';
+    }
+}
+
 function fmt_permissions(obj, permissions, lookup, show, warning) {
     var res = [];
     for (var i in permissions) {
         var permission = permissions[i];
         if (permission[lookup] == obj.name) {
-            res.push(permission[show]);
+            res.push(fmt_escape_html(permission[show]));
         }
     }
     return res.length == 0 ? warning : res.join(', ');
@@ -866,12 +774,18 @@ var radio_id = 0;
 function fmt_radio(name, text, value, current) {
     radio_id++;
     return '<label class="radio" for="radio-' + radio_id + '">' +
-        '<input type="radio" id="radio-' + radio_id + '" name="' + name + 
+        '<input type="radio" id="radio-' + radio_id + '" name="' + name +
         '" value="' + value + '"' +
         ((value == current) ? ' checked="checked"' : '') +
         '>' + text + '</label>';
 }
 
+function fmt_checkbox(name, text, current) {
+    return '<label class="checkbox" for="checkbox-' + name + '">' +
+        '<input type="checkbox" id="checkbox-' + name + '" name="' + name +
+        '"' + (current ? ' checked="checked"' : '') + '>' + text + '</label>';
+}
+
 function properties_size(obj) {
     var count = 0;
     for (k in obj) {
index 0111aedf0f4149e45df1fba74810c9a6b3e503ee..f2de0d98d6fd5c43fa975205b80c7b38d96775b1 100644 (file)
@@ -18,8 +18,10 @@ var KNOWN_ARGS = {'alternate-exchange':        {'short': 'AE',  'type': 'string'
                   'x-message-ttl':             {'short': 'TTL', 'type': 'int'},
                   'x-expires':                 {'short': 'Exp', 'type': 'int'},
                   'x-max-length':              {'short': 'Lim', 'type': 'int'},
+                  'x-max-length-bytes':        {'short': 'Lim B', 'type': 'int'},
                   'x-dead-letter-exchange':    {'short': 'DLX', 'type': 'string'},
-                  'x-dead-letter-routing-key': {'short': 'DLK', 'type': 'string'}};
+                  'x-dead-letter-routing-key': {'short': 'DLK', 'type': 'string'},
+                  'x-max-priority':            {'short': 'Pri', 'type': 'int'}};
 
 // Things that are like arguments that we format the same way in listings.
 var IMPLICIT_ARGS = {'durable':         {'short': 'D',   'type': 'boolean'},
@@ -28,8 +30,8 @@ var IMPLICIT_ARGS = {'durable':         {'short': 'D',   'type': 'boolean'},
 
 // Both the above
 var ALL_ARGS = {};
-for (var k in KNOWN_ARGS)    ALL_ARGS[k] = KNOWN_ARGS[k];
 for (var k in IMPLICIT_ARGS) ALL_ARGS[k] = IMPLICIT_ARGS[k];
+for (var k in KNOWN_ARGS)    ALL_ARGS[k] = KNOWN_ARGS[k];
 
 var NAVIGATION = {'Overview':    ['#/',            "management"],
                   'Connections': ['#/connections', "management"],
@@ -49,6 +51,84 @@ var CHART_PERIODS = {'60|5':       'Last minute',
                      '28800|600':  'Last eight hours',
                      '86400|1800': 'Last day'};
 
+var COLUMNS =
+    {'exchanges' :
+     {'Overview': [['type',                 'Type',                   true],
+                   ['features',             'Features (with policy)', true],
+                   ['features_no_policy',   'Features (no policy)',   false],
+                   ['policy',               'Policy',                 false]],
+      'Message rates': [['rate-in',         'rate in',                true],
+                        ['rate-out',        'rate out',               true]]},
+     'queues' :
+     {'Overview': [['features',             'Features (with policy)', true],
+                   ['features_no_policy',   'Features (no policy)',   false],
+                   ['policy',               'Policy',                 false],
+                   ['consumers',            'Consumer count',         false],
+                   ['consumer_utilisation', 'Consumer utilisation',   false],
+                   ['state',                'State',                  true]],
+      'Messages': [['msgs-ready',      'Ready',          true],
+                   ['msgs-unacked',    'Unacknowledged', true],
+                   ['msgs-ram',        'In memory',      false],
+                   ['msgs-persistent', 'Persistent',     false],
+                   ['msgs-total',      'Total',          true]],
+      'Message bytes': [['msg-bytes-ready',      'Ready',          false],
+                        ['msg-bytes-unacked',    'Unacknowledged', false],
+                        ['msg-bytes-ram',        'In memory',      false],
+                        ['msg-bytes-persistent', 'Persistent',     false],
+                        ['msg-bytes-total',      'Total',          false]],
+      'Message rates': [['rate-incoming',  'incoming',      true],
+                        ['rate-deliver',   'deliver / get', true],
+                        ['rate-redeliver', 'redelivered',   false],
+                        ['rate-ack',       'ack',           true]]},
+     'channels' :
+     {'Overview': [['user',  'User name', true],
+                   ['mode',  'Mode',      true],
+                   ['state', 'State',     true]],
+      'Details': [['msgs-unconfirmed', 'Unconfirmed', true],
+                  ['prefetch',         'Prefetch',    true],
+                  ['msgs-unacked',     'Unacked',     true]],
+      'Transactions': [['msgs-uncommitted', 'Msgs uncommitted', false],
+                       ['acks-uncommitted', 'Acks uncommitted', false]],
+      'Message rates': [['rate-publish',   'publish',            true],
+                        ['rate-confirm',   'confirm',            true],
+                        ['rate-return',    'return (mandatory)', false],
+                        ['rate-deliver',   'deliver / get',      true],
+                        ['rate-redeliver', 'redelivered',        false],
+                        ['rate-ack',       'ack',                true]]},
+     'connections':
+     {'Overview': [['user',   'User name', true],
+                   ['state',  'State',     true]],
+      'Details': [['ssl',            'SSL / TLS',      true],
+                  ['ssl_info',       'SSL Details',    false],
+                  ['protocol',       'Protocol',       true],
+                  ['channels',       'Channels',       true],
+                  ['channel_max',    'Channel max',    false],
+                  ['frame_max',      'Frame max',      false],
+                  ['auth_mechanism', 'Auth mechanism', false],
+                  ['client',         'Client',         false]],
+      'Network': [['from_client',  'From client',  true],
+                  ['to_client',    'To client',    true],
+                  ['heartbeat',    'Heartbeat',    false],
+                  ['connected_at', 'Connected at', false]]},
+
+     'vhosts':
+     {'Messages': [['msgs-ready',      'Ready',          true],
+                   ['msgs-unacked',    'Unacknowledged', true],
+                   ['msgs-total',      'Total',          true]],
+      'Network': [['from_client',  'From client',  true],
+                  ['to_client',    'To client',    true]],
+      'Message rates': [['rate-publish', 'publish',       true],
+                        ['rate-deliver', 'deliver / get', true]]},
+     'overview':
+     {'Statistics': [['file_descriptors',   'File descriptors',   true],
+                     ['socket_descriptors', 'Socket descriptors', true],
+                     ['erlang_processes',   'Erlang processes',   true],
+                     ['memory',             'Memory',             true],
+                     ['disk_space',         'Disk space',         true]],
+      'General': [['uptime',     'Uptime',     false],
+                  ['rates_mode', 'Rates mode', false],
+                  ['info',       'Info',       true]]}};
+
 ///////////////////////////////////////////////////////////////////////////
 //                                                                       //
 // Mostly constant, typically get set once at startup (or rarely anyway) //
@@ -56,8 +136,9 @@ var CHART_PERIODS = {'60|5':       'Last minute',
 ///////////////////////////////////////////////////////////////////////////
 
 // All these are to do with hiding UI elements if
-var statistics_level;            // ...there are no fine stats
+var rates_mode;                  // ...there are no fine stats
 var user_administrator;          // ...user is not an admin
+var user_policymaker;            // ...user is not a policymaker
 var user_monitor;                // ...user cannot monitor
 var nodes_interesting;           // ...we are not in a cluster
 var vhosts_interesting;          // ...there is only one vhost
@@ -82,19 +163,20 @@ var user;
 // Set up the above vars
 function setup_global_vars() {
     var overview = JSON.parse(sync_get('/overview'));
-    statistics_level = overview.statistics_level;
+    rates_mode = overview.rates_mode;
     user_tags = expand_user_tags(user.tags.split(","));
     user_administrator = jQuery.inArray("administrator", user_tags) != -1;
+    user_policymaker = jQuery.inArray("policymaker", user_tags) != -1;
     user_monitor = jQuery.inArray("monitoring", user_tags) != -1;
     replace_content('login-details',
-                    '<p>User: <b>' + user.name + '</b></p>' +
-                    '<p>Cluster: <b>' + overview.cluster_name + '</b> ' +
+                    '<p>User: <b>' + fmt_escape_html(user.name) + '</b></p>' +
+                    '<p>Cluster: <b>' + fmt_escape_html(overview.cluster_name) + '</b> ' +
                     (user_administrator ?
                      '(<a href="#/cluster-name">change</a>)' : '') + '</p>' +
-                    '<p>RabbitMQ ' + overview.rabbitmq_version +
+                    '<p>RabbitMQ ' + fmt_escape_html(overview.rabbitmq_version) +
                     ', <acronym class="normal" title="' +
-                    overview.erlang_full_version + '">Erlang ' +
-                    overview.erlang_version + '</acronym></p>');
+                    fmt_escape_html(overview.erlang_full_version) + '">Erlang ' +
+                    fmt_escape_html(overview.erlang_version) + '</acronym></p>');
     nodes_interesting = false;
     rabbit_versions_interesting = false;
     if (user_monitor) {
@@ -145,6 +227,10 @@ var current_template;
 // Which JSON requests do we need to populate it
 var current_reqs;
 
+// And which of those have yet to return (so we can cancel them when
+// changing current_template).
+var outstanding_reqs = [];
+
 // Which tab is highlighted
 var current_highlight;
 
index b2e3b132c83f08b7c9d323f072b814935d779666..f50c19a38636a574c7c9704f492801e575a1676c 100644 (file)
@@ -17,6 +17,9 @@ HELP = {
     'queue-max-length':
       'How many (ready) messages a queue can contain before it starts to drop them from its head.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/maxlength.html">x-max-length</a>" argument.)',
 
+    'queue-max-length-bytes':
+      'Total body size for ready messages a queue can contain before it starts to drop them from its head.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/maxlength.html">x-max-length-bytes</a>" argument.)',
+
     'queue-auto-delete':
       'If yes, the queue will delete itself after at least one consumer has connected, and then all consumers have disconnected.',
 
@@ -26,11 +29,17 @@ HELP = {
     'queue-dead-letter-routing-key':
       'Optional replacement routing key to use when a message is dead-lettered. If this is not set, the message\'s original routing key will be used.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/dlx.html">x-dead-letter-routing-key</a>" argument.)',
 
-    'queue-memory-resident':
-      '<p>Number of messages in the queue which are held in memory. These messages may also be on disc (if they are persistent).</p><p>There may be a limit imposed in order to manage total memory use. If the number of memory-resident messages in the queue exceeds the limit some messages will be paged out.</p>',
+    'queue-max-priority':
+      'Maximum number of priority levels for the queue to support; if not set, the queue will not support message priorities.<br/>(Sets the "<a target="_blank" href="http://rabbitmq.com/priority.html">x-max-priority</a>" argument.)',
+
+    'queue-messages':
+      '<p>Message counts.</p><p>Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.</p>',
 
-    'queue-persistent':
-      'Number of messages in the queue which are persistent. These messages will be on disc but may also be available in memory. Note that if a message is published as persistent but routed to a transient queue it is not considered persistent by that queue, so transient queues will always report 0 persistent messages.',
+    'queue-message-body-bytes':
+      '<p>The sum total of the sizes of the message bodies in this queue. This only counts message bodies; it does not include message properties (including headers) or metadata used by the queue.</p><p>Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.</p><p>If a message is routed to multiple queues on publication, its body will be stored only once (in memory and on disk) and shared between queues. The value shown here does not take account of this effect.</p>',
+
+    'queue-process-memory':
+      'Total memory used by this queue process. This does not include in-memory message bodies (which may be shared between queues and will appear in the global "binaries" memory) but does include everything else.',
 
     'queue-consumer-utilisation':
       'Fraction of the time that the queue is able to immediately deliver messages to consumers. If this number is less than 100% you may be able to deliver messages faster if: \
@@ -188,9 +197,7 @@ HELP = {
         <dd>Number of messages for which the server is waiting for acknowledgement.</dd>\
         <dt>Total</dt>\
         <dd>The total of these two numbers.</dd>\
-      </dl>\
-    Note that the rate of change of total queued messages does \
-    <b>not</b> include messages removed due to queue deletion.',
+      </dl>',
 
     'message-rates':
     'Only rates for which some activity is taking place will be shown.\
@@ -213,7 +220,16 @@ HELP = {
         <dd>Rate at which messages with the \'redelivered\' flag set are being delivered. Note that these messages will <b>also</b> be counted in one of the delivery rates above.</dd>\
         <dt>Return</dt>\
         <dd>Rate at which basic.return is sent to publishers for unroutable messages published with the \'mandatory\' flag set.</dd>\
-      </dl>',
+        <dt>Disk read</dt>\
+        <dd>Rate at which queues read messages from disk.</dd>\
+        <dt>Disk write</dt>\
+        <dd>Rate at which queues write messages to disk.</dd>\
+      </dl>\
+      <p>\
+        Note that the last two items are originate in queues rather than \
+        channels; they may therefore be slightly out of sync with other \
+        statistics.\
+      </p>',
 
     'disk-monitoring-no-watermark' : 'There is no <a target="_blank" href="http://www.rabbitmq.com/memory.html#diskfreesup">disk space low watermark</a> set. RabbitMQ will not take any action to avoid running out of disk space.',
 
@@ -221,51 +237,21 @@ HELP = {
 
     'memory-use' : '<p>Note that the memory details shown here are only updated on request - they could be too expensive to calculate every few seconds on a busy server.</p><p><a target="_blank" href="http://www.rabbitmq.com/memory-use.html">Read more</a> on memory use.</p>',
 
-    'policy-definitions' : '<dl>\
-<dt><code>ha-mode</code></dt>\
-  <dd>\
-    One of <code>all</code>, <code>exactly</code>\
-    or <code>nodes</code>.\
-  </dd>\
-  <dt><code>ha-params</code></dt>\
-  <dd>\
-    Absent if <code>ha-mode</code> is <code>all</code>, a number\
+    'binary-use' : '<p>Binary accounting is not exact; binaries are shared between processes (and thus the same binary might be counted in more than one section), and the VM does not allow us to track binaries that are not associated with processes (so some binary use might not appear at all).</p>',
+
+    'policy-ha-mode' : 'One of <code>all</code> (mirror to all nodes in the cluster), <code>exactly</code> (mirror to a set number of nodes) or <code>nodes</code> (mirror to an explicit list of nodes). If you choose one of the latter two, you must also set <code>ha-params</code>.',
+
+    'policy-ha-params' : 'Absent if <code>ha-mode</code> is <code>all</code>, a number\
     if <code>ha-mode</code> is <code>exactly</code>, or a list\
-    of strings if <code>ha-mode</code> is <code>nodes</code>.\
-  </dd>\
-  <dt><code>ha-sync-mode</code></dt>\
-  <dd>\
-    One of <code>manual</code> or <code>automatic</code>.\
-  </dd>\
-  <dt><code>alternate-exchange</code></dt>\
-  <dd>\
-    The name of an alternate exchange.\
-  </dd>\
-  <dt><code>dead-letter-exchange</code></dt>\
-  <dd>\
-    The name of a dead letter exchange.\
-  </dd>\
-  <dt><code>dead-letter-routing-key</code></dt>\
-  <dd>\
-    Key to use when dead-lettering.\
-  </dd>\
-  <dt><code>message-ttl</code></dt>\
-  <dd>\
-    Per-queue message TTL, in milliseconds.\
-  </dd>\
-  <dt><code>expires</code></dt>\
-  <dd>\
-    Queue TTL, in milliseconds.\
-  </dd>\
-  <dt><code>max-length</code></dt>\
-  <dd>\
-    Maximum queue length, in messages.\
-  </dd>\
-  <dt><code>federation-upstream-set</code></dt>\
-  <dd>\
-    A string; only if the federation plugin is enabled.\
-  </dd>\
-</dl>',
+    of strings if <code>ha-mode</code> is <code>nodes</code>.',
+
+    'policy-ha-sync-mode' : 'One of <code>manual</code> or <code>automatic</code>.',
+
+    'policy-federation-upstream-set' :
+    'A string; only if the federation plugin is enabled. Chooses the name of a set of upstreams to use with federation, or "all" to use all upstreams. Incompatible with <code>federation-upstream</code>.',
+
+    'policy-federation-upstream' :
+    'A string; only if the federation plugin is enabled. Chooses a specific upstream set to use for federation. Incompatible with <code>federation-upstream-set</code>.',
 
     'handle-exe' : 'In order to monitor the number of file descriptors in use on Windows, RabbitMQ needs the <a href="http://technet.microsoft.com/en-us/sysinternals/bb896655" target="_blank">handle.exe command line tool from Microsoft</a>. Download it and place it in the path (e.g. in C:\Windows).',
 
@@ -274,6 +260,68 @@ HELP = {
     and regular expressions are matched in a case-insensitive manner.<br/></br/> \
     (<a href="https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions" target="_blank">Regular expression reference</a>)',
 
+    'plugins' :
+    'Note that only plugins which are both explicitly enabled and running are shown here.',
+
+    'io-operations':
+    'Rate of I/O operations. Only operations performed by the message \
+      persister are shown here (e.g. metadata changes in Mnesia or writes \
+      to the log files are not shown).\
+      <dl>\
+        <dt>Read</dt>\
+        <dd>Rate at which data is read from the disk.</dd>\
+        <dt>Write</dt>\
+        <dd>Rate at which data is written to the disk.</dd>\
+        <dt>Seek</dt>\
+        <dd>Rate at which the broker switches position while reading or \
+         writing to disk.</dd>\
+        <dt>Sync</dt>\
+        <dd>Rate at which the broker invokes <code>fsync()</code> to ensure \
+         data is flushed to disk.</dd>\
+        <dt>Reopen</dt>\
+        <dd>Rate at which the broker recycles file handles in order to support \
+         more queues than it has file handles. If this operation is occurring \
+         frequently you may get a performance boost from increasing the number \
+         of file handles available.</dd>\
+      </dl>',
+
+    'mnesia-transactions':
+    'Rate at which Mnesia transactions are initiated on this node (this node \
+     will also take part in Mnesia transactions initiated on other nodes).\
+      <dl>\
+        <dt>RAM only</dt>\
+        <dd>Rate at which RAM-only transactions take place (e.g. creation / \
+            deletion of transient queues).</dd>\
+        <dt>Disk</dt>\
+        <dd>Rate at which disk (and RAM) transactions take place (.e.g \
+            creation / deletion of durable queues).</dd>\
+      </dl>',
+
+    'persister-operations-msg':
+    'Rate at which per-message persister operations take place on this node. See \
+     <a href="http://www.rabbitmq.com/persistence-conf.html" target="_blank">here</a> \
+     for more information on the persister. \
+      <dl>\
+        <dt>QI Journal</dt>\
+        <dd>Rate at which message information (publishes, deliveries and \
+            acknowledgements) is written to queue index journals.</dd>\
+        <dt>Store Read</dt>\
+        <dd>Rate at which messages are read from the message store.</dd>\
+        <dt>Store Write</dt>\
+        <dd>Rate at which messages are written to the message store.</dd>\
+      </dl>',
+
+    'persister-operations-bulk':
+    'Rate at which whole-file persister operations take place on this node. See \
+     <a href="http://www.rabbitmq.com/persistence-conf.html" target="_blank">here</a> \
+     for more information on the persister. \
+      <dl>\
+        <dt>QI Read</dt>\
+        <dd>Rate at which queue index segment files are read.</dd>\
+        <dt>QI Write</dt>\
+        <dd>Rate at which queue index segment files are written. </dd>\
+      </dl>',
+
     'foo': 'foo' // No comma.
 };
 
index 3091d9071e3e4875f088de5ed1dec0e5386f5eec..8118f6215fe6c8451340a8fc11ae524cba8a000c 100644 (file)
@@ -16,16 +16,16 @@ function dispatcher() {
     }
 }
 
-function set_auth_cookie(userinfo) {
+function set_auth_pref(userinfo) {
     var b64 = b64_encode_utf8(userinfo);
-    document.cookie = 'auth=' + encodeURIComponent(b64);
+    store_pref('auth', encodeURIComponent(b64));
 }
 
 function login_route () {
     var userpass = '' + this.params['username'] + ':' + this.params['password'],
         location = window.location.href,
         hash = window.location.hash;
-    set_auth_cookie(decodeURIComponent(userpass));
+    set_auth_pref(decodeURIComponent(userpass));
     location = location.substr(0, location.length - hash.length);
     window.location.replace(location);
     // because we change url, we don't need to hit check_login as
@@ -38,13 +38,13 @@ function start_app_login() {
         this.put('#/login', function() {
             username = this.params['username'];
             password = this.params['password'];
-            set_auth_cookie(username + ':' + password);
+            set_auth_pref(username + ':' + password);
             check_login();
         });
         this.get('#/login/:username/:password', login_route)
     });
     app.run();
-    if (get_cookie('auth') != '') {
+    if (get_pref('auth') != null) {
         check_login();
     }
 }
@@ -52,7 +52,7 @@ function start_app_login() {
 function check_login() {
     user = JSON.parse(sync_get('/whoami'));
     if (user == false) {
-        document.cookie = 'auth=; expires=Thu, 01 Jan 1970 00:00:00 GMT';
+        clear_pref('auth');
         replace_content('login-status', '<p>Login failed</p>');
     }
     else {
@@ -189,9 +189,9 @@ function reset_timer() {
 function update_manual(div, query) {
     var path;
     var template;
-    if (query == 'memory') {
-        path = current_reqs['node'] + '?memory=true';
-        template = 'memory';
+    if (query == 'memory' || query == 'binary') {
+        path = current_reqs['node']['path'] + '?' + query + '=true';
+        template = query;
     }
     var data = JSON.parse(sync_get(path));
 
@@ -202,6 +202,10 @@ function update_manual(div, query) {
 function render(reqs, template, highlight) {
     current_template = template;
     current_reqs = reqs;
+    for (var i in outstanding_reqs) {
+        outstanding_reqs[i].abort();
+    }
+    outstanding_reqs = [];
     current_highlight = highlight;
     update();
 }
@@ -368,7 +372,6 @@ function y_position() {
 
 function with_update(fun) {
     with_reqs(apply_state(current_reqs), [], function(json) {
-            json.statistics_level = statistics_level;
             var html = format(current_template, json);
             fun(html);
             update_status('ok');
@@ -400,7 +403,7 @@ function apply_state(reqs) {
         if (options['ranges'] != undefined) {
             for (i in options['ranges']) {
                 var type = options['ranges'][i];
-                var range = get_pref('chart-range-' + type).split('|');
+                var range = get_pref('chart-range').split('|');
                 var prefix;
                 if (type.substring(0, 8) == 'lengths-') {
                     prefix = 'lengths';
@@ -411,6 +414,9 @@ function apply_state(reqs) {
                 else if (type.substring(0, 11) == 'data-rates-') {
                     prefix = 'data_rates';
                 }
+                else if (type == 'node-stats') {
+                    prefix = 'node_stats';
+                }
                 qs.push(prefix + '_age=' + parseInt(range[0]));
                 qs.push(prefix + '_incr=' + parseInt(range[1]));
             }
@@ -457,7 +463,7 @@ function postprocess() {
             return confirm("Are you sure? This object cannot be recovered " +
                            "after deletion.");
         });
-    $('div.section h2, div.section-hidden h2').click(function() {
+    $('div.section h2, div.section-hidden h2').die().live('click', function() {
             toggle_visibility($(this));
         });
     $('label').map(function() {
@@ -473,7 +479,7 @@ function postprocess() {
     $('#download-definitions').click(function() {
             var path = 'api/definitions?download=' +
                 esc($('#download-filename').val()) +
-                '&auth=' + get_cookie('auth');
+                '&auth=' + get_pref('auth');
             window.location = path;
             setTimeout('app.run()');
             return false;
@@ -501,31 +507,46 @@ function postprocess() {
             }
         }
     });
-    setup_visibility();
     $('.help').die().live('click', function() {
         help($(this).attr('id'))
     });
-    $('.rate-options').die().live('click', function() {
+    $('.popup-options-link').die().live('click', function() {
         var remove = $('.popup-owner').length == 1 &&
                      $('.popup-owner').get(0) == $(this).get(0);
         $('.popup-owner').removeClass('popup-owner');
         if (remove) {
-            $('.form-popup-rate-options').fadeOut(200, function() {
+            $('.form-popup-options').fadeOut(200, function() {
                 $(this).remove();
             });
         }
         else {
             $(this).addClass('popup-owner');
-            show_popup('rate-options', format('rate-options', {span: $(this)}),
+            var template = $(this).attr('type') + '-options';
+            show_popup('options', format(template, {span: $(this)}),
                        'fade');
         }
     });
+    $('.rate-visibility-option').die().live('click', function() {
+        var k = $(this).attr('data-pref');
+        var show = get_pref(k) !== 'true';
+        store_pref(k, '' + show);
+        partial_update();
+    });
     $('input, select').live('focus', function() {
         update_counter = 0; // If there's interaction, reset the counter.
     });
     $('.tag-link').click(function() {
         $('#tags').val($(this).attr('tag'));
     });
+    $('.argument-link').click(function() {
+        var field = $(this).attr('field');
+        var row = $('#' + field).find('.mf').last();
+        var key = row.find('input').first();
+        var type = row.find('select').last();
+        key.val($(this).attr('key'));
+        type.val($(this).attr('type'));
+        update_multifields();
+    });
     $('form.auto-submit select, form.auto-submit input').live('click', function(){
         $(this).parents('form').submit();
     });
@@ -539,6 +560,7 @@ function postprocess() {
 }
 
 function postprocess_partial() {
+    setup_visibility();
     $('.sort').click(function() {
             var sort = $(this).attr('sort');
             if (current_sort == sort) {
@@ -570,7 +592,8 @@ function update_multifield(multifield, dict) {
     var largest_id = 0;
     var empty_found = false;
     var name = multifield.attr('id');
-    $('#' + name + ' *[name$="_mftype"]').each(function(index) {
+    var type_inputs = $('#' + name + ' *[name$="_mftype"]');
+    type_inputs.each(function(index) {
         var re = new RegExp(name + '_([0-9]*)_mftype');
         var match = $(this).attr('name').match(re);
         if (!match) return;
@@ -591,10 +614,12 @@ function update_multifield(multifield, dict) {
                 var key = dict ? $('#' + prefix + '_mfkey').val() : '';
                 var value = input.val();
                 if (key == '' && value == '') {
-                    if (empty_found) {
-                        $(this).parent().remove();
+                    if (index == type_inputs.length - 1) {
+                        empty_found = true;
+                    }
+                    else {
+                        $(this).parents('.mf').first().remove();
                     }
-                    empty_found = true;
                 }
             }
             else {
@@ -610,13 +635,13 @@ function update_multifield(multifield, dict) {
             multifield_input(prefix, 'type', t);
 
         if (dict) {
-            multifield.append('<table><tr><td>' +
+            multifield.append('<table class="mf"><tr><td>' +
                               multifield_input(prefix, 'key', 'text') +
                               '</td><td class="equals"> = </td><td>' +
                               val_type + '</td></tr></table>');
         }
         else {
-            multifield.append('<div>' + val_type + '</div>');
+            multifield.append('<div class="mf">' + val_type + '</div>');
         }
     }
 }
@@ -696,6 +721,11 @@ function setup_visibility() {
         }
         if (show) {
             $(this).addClass('section-visible');
+            // Workaround for... something. Although div.hider is
+            // display:block anyway, not explicitly setting this
+            // prevents the first slideToggle() from animating
+            // successfully; instead the element just vanishes.
+            $(this).find('.hider').attr('style', 'display:block;');
         }
         else {
             $(this).addClass('section-invisible');
@@ -835,7 +865,7 @@ function update_status(status) {
 }
 
 function auth_header() {
-    return "Basic " + decodeURIComponent(get_cookie('auth'));
+    return "Basic " + decodeURIComponent(get_pref('auth'));
 }
 
 function with_req(method, path, body, fun) {
@@ -845,12 +875,17 @@ function with_req(method, path, body, fun) {
     req.setRequestHeader('authorization', auth_header());
     req.onreadystatechange = function () {
         if (req.readyState == 4) {
+            var ix = jQuery.inArray(req, outstanding_reqs);
+            if (ix != -1) {
+                outstanding_reqs.splice(ix, 1);
+            }
             if (check_bad_response(req, true)) {
                 last_successful_connect = new Date();
                 fun(req);
             }
         }
     };
+    outstanding_reqs.push(req);
     req.send(body);
 }
 
@@ -966,10 +1001,7 @@ function fill_path_template(template, params) {
 }
 
 function params_magic(params) {
-    return check_password(
-             add_known_arguments(
-               maybe_remove_fields(
-                 collapse_multifields(params))));
+    return check_password(maybe_remove_fields(collapse_multifields(params)));
 }
 
 function collapse_multifields(params0) {
@@ -1043,25 +1075,6 @@ function collapse_multifields(params0) {
     return params;
 }
 
-function add_known_arguments(params) {
-    for (var k in KNOWN_ARGS) {
-        var v = params[k];
-        if (v != undefined && v != '') {
-            var type = KNOWN_ARGS[k].type;
-            if (type == 'int') {
-                v = parseInt(v);
-                if (isNaN(v)) {
-                    throw(k + " must be an integer.");
-                }
-            }
-            params.arguments[k] = v;
-        }
-        delete params[k];
-    }
-
-    return params;
-}
-
 function check_password(params) {
     if (params['password'] != undefined) {
         if (params['password'] == '') {
@@ -1139,6 +1152,20 @@ function put_policy(sammy, mandatory_keys, num_keys, bool_keys) {
     if (sync_put(sammy, '/policies/:vhost/:name')) update();
 }
 
+function update_column_options(sammy) {
+    var mode = sammy.params['mode'];
+    for (var group in COLUMNS[mode]) {
+        var options = COLUMNS[mode][group];
+        for (var i = 0; i < options.length; i++) {
+            var key = options[i][0];
+            var value = sammy.params[mode + '-' + key] != undefined;
+            store_pref('column-' + mode + '-' + key, value);
+        }
+    }
+
+    partial_update();
+}
+
 function debug(str) {
     $('<p>' + str + '</p>').appendTo('#debug');
 }
index c0f6edcb7c541710435421e8b250080eb7ebfec2..d0cccb266f880adf429ec25427e7ff10c5cd04f7 100644 (file)
@@ -1,19 +1,45 @@
-// TODO It would be nice to use DOM storage. When that's available.
+// TODO strip out all this cookie nonsense when we drop support for MSIE 7.
+
+function local_storage_available() {
+    try {
+        return 'localStorage' in window && window['localStorage'] !== null;
+    } catch (e) {
+        return false;
+    }
+}
 
 function store_pref(k, v) {
-    var d = parse_cookie();
-    d[short_key(k)] = v;
-    store_cookie(d);
+    if (local_storage_available()) {
+        window.localStorage['rabbitmq.' + k] = v;
+    }
+    else {
+        var d = parse_cookie();
+        d[short_key(k)] = v;
+        store_cookie(d);
+    }
 }
 
 function clear_pref(k) {
-    var d = parse_cookie();
-    delete d[short_key(k)];
-    store_cookie(d);
+    if (local_storage_available()) {
+        window.localStorage.removeItem('rabbitmq.' + k);
+    }
+    else {
+        var d = parse_cookie();
+        delete d[short_key(k)];
+        store_cookie(d);
+    }
+
 }
 
 function get_pref(k) {
-    var r = parse_cookie()[short_key(k)];
+    var r;
+    if (local_storage_available()) {
+        r = window.localStorage['rabbitmq.' + k];
+    }
+    else {
+        r = parse_cookie()[short_key(k)];
+
+    }
     return r == undefined ? default_pref(k) : r;
 }
 
@@ -21,16 +47,38 @@ function section_pref(template, name) {
     return 'visible|' + template + '|' + name;
 }
 
+function show_column(mode, column) {
+    return get_pref('column-' + mode + '-' + column) == 'true';
+}
+
 // ---------------------------------------------------------------------------
 
 function default_pref(k) {
-    if (k.substring(0, 12) == 'chart-range-') return '60|5';
     if (k.substring(0, 11) == 'chart-size-')  return 'small';
     if (k.substring(0, 10) == 'rate-mode-')   return 'chart';
+    if (k.substring(0, 11) == 'chart-line-')  return 'true';
     if (k == 'truncate')                      return '100';
+    if (k == 'chart-range')                   return '60|5';
+    if (k.substring(0,  7) == 'column-')
+        return default_column_pref(k.substring(7));
     return null;
 }
 
+function default_column_pref(key0) {
+    var ix = key0.indexOf('-');
+    var mode = key0.substring(0, ix);
+    var key = key0.substring(ix + 1);
+    for (var group in COLUMNS[mode]) {
+        var options = COLUMNS[mode][group];
+        for (var i = 0; i < options.length; i++) {
+            if (options[i][0] == key) {
+                return '' + options[i][2];
+            }
+        }
+    }
+    return 'false';
+}
+
 // ---------------------------------------------------------------------------
 
 function parse_cookie() {
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/binary.ejs b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/binary.ejs
new file mode 100644 (file)
index 0000000..c3e13c7
--- /dev/null
@@ -0,0 +1,53 @@
+<%
+  if (binary == "not_available") {
+%>
+<p class="warning">
+  Binary statistics not available.
+</p>
+<% } else { %>
+<%
+  var sections = {'queue_procs'         : ['queue',  'Queues'],
+                  'queue_slave_procs'   : ['queue',  'Queues (slaves)'],
+                  'connection_readers'  : ['conn',   'Connection readers'],
+                  'connection_writers'  : ['conn',   'Connection writers'],
+                  'connection_channels' : ['conn',   'Connection channels'],
+                  'connection_other'    : ['conn',   'Connections (other)'],
+                  'msg_index'           : ['table',  'Message store index'],
+                  'mgmt_db'             : ['table',  'Management database'],
+                  'plugins'             : ['proc',   'Plugins'],
+                  'other'               : ['system', 'Other binary references']};
+   var total_out = [];
+%>
+<%= format('memory-bar', {sections: sections, memory: binary, total_out: total_out}) %>
+<span class="clear">&nbsp;</span>
+<div class="box">
+<%
+var key = [[{name: 'Queues', colour: 'queue',
+             keys: [['queue_procs',         'queues'],
+                    ['queue_slave_procs',   'slaves']]}],
+
+           [{name: 'Connections', colour: 'conn',
+             keys: [['connection_readers',  'readers'],
+                    ['connection_writers',  'writers'],
+                    ['connection_channels', 'channels'],
+                    ['connection_other',    'other']]}],
+
+           [{name: 'Tables', colour: 'table',
+             keys: [['msg_index',           'message store index'],
+                    ['mgmt_db',             'management database']]}],
+
+           [{name: 'Processes', colour: 'proc',
+             keys: [['plugins',             'plugins']]},
+            {name: 'System', colour: 'system',
+             keys: [['other',               'other']]}]];
+%>
+<%= format('memory-table', {key: key, memory: binary}) %>
+</div>
+
+<div class="memory-info">
+  Last updated: <b><%= fmt_date(new Date()) %></b>.<br/>
+  Total referenced binaries at last update: <b><%= fmt_bytes(total_out[0]) %></b>
+  <span class="help" id="binary-use"></span>
+</div>
+
+<% } %>
index e7843d1461d54c76934c525d786b0bb56693cc62..1e9d18e7f98a9878669d67f939b34e7df45ffe9a 100644 (file)
@@ -1,15 +1,14 @@
-<h1>Channel: <b><%= fmt_escape_html(channel.name) %></b></h1>
+<h1>Channel: <b><%= fmt_escape_html(channel.name) %></b><%= fmt_maybe_vhost(channel.vhost) %></h1>
 
 <div class="section">
 <h2>Overview</h2>
-<div class="hider">
-<% if (statistics_level == 'fine') { %>
+<div class="hider updatable">
+<% if (rates_mode != 'none') { %>
     <%= message_rates('msg-rates-ch', channel.message_stats) %>
 <% } %>
 
-<div class="updatable">
 <h3>Details</h3>
-<table class="facts">
+<table class="facts facts-l">
   <tr>
     <th>Connection</th>
     <td><%= link_conn(channel.connection_details.name) %></td>
     <th>Node</th>
     <td><%= fmt_node(channel.node) %></td>
   </tr>
-<% } %>
-<% if (vhosts_interesting) { %>
-  <tr>
-    <th>Virtual host</th>
-    <td><%= fmt_string(channel.vhost) %></td>
-  </tr>
 <% } %>
   <tr>
     <th>Username</th>
@@ -36,7 +29,7 @@
   </tr>
 </table>
 
-<table class="facts">
+<table class="facts facts-l">
   <tr>
     <th>State</th>
     <td><%= fmt_object_state(channel) %></td>
@@ -69,7 +62,6 @@
     <td><%= channel.acks_uncommitted %></td>
   </tr>
 </table>
-</div>
 
 </div>
 </div>
@@ -81,7 +73,7 @@
   </div>
 </div>
 
-<% if (statistics_level == 'fine') { %>
+<% if (rates_mode == 'detailed') { %>
 <div class="section">
 <h2>Message rates breakdown</h2>
 <div class="hider updatable">
index fa944a8a6b281fcff20765c1bc6d3b546a5e5af7..591a7c04b2b94f8edbb624f66a9796012b818036 100644 (file)
@@ -1,22 +1,18 @@
 <% if (channels.length > 0) { %>
-<%
-     var col_return_unroutable = !is_col_empty(channels, 'return_unroutable');
-     var col_redeliver = !is_col_empty(channels, 'redeliver');
-     var ratesWidth = col_return_unroutable ? 5 : 4;
-%>
 <table class="list">
  <thead>
   <tr>
 <% if (mode == 'standalone') { %>
-   <th colspan="<% if (nodes_interesting) { %>2<% } else { %>1<% } %>"></th>
-   <th colspan="<% if (vhosts_interesting) { %>7<% } else { %>6<% } %>">Details</th>
+    <%= group_heading('channels', 'Overview', [true, vhosts_interesting, nodes_interesting]) %>
 <% } else { %>
-   <th></th>
-   <th colspan="5">Details</th>
+    <%= group_heading('channels', 'Overview', [true]) %>
 <% } %>
-<% if (statistics_level == 'fine') { %>
-   <th colspan="<%= ratesWidth %>">Message rates</th>
+    <%= group_heading('channels', 'Details', []) %>
+    <%= group_heading('channels', 'Transactions', []) %>
+<% if (rates_mode != 'none') { %>
+    <%= group_heading('channels', 'Message rates', []) %>
 <% } %>
+    <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="channels">+/-</span></th>
   </tr>
   <tr>
 <% if (mode == 'standalone') { %>
 <% if (vhosts_interesting) { %>
     <th><%= fmt_sort('Virtual host',    'vhost') %></th>
 <% } %>
+<% if (show_column('channels', 'user')) { %>
     <th><%= fmt_sort('User name',       'user') %></th>
+<% } %>
+<% if (show_column('channels', 'mode')) { %>
     <th>Mode <span class="help" id="channel-mode"></span></th>
+<% } %>
+<% if (show_column('channels', 'state')) { %>
+    <th><%= fmt_sort('State',           'state') %></th>
+<% } %>
+<% if (show_column('channels', 'msgs-unconfirmed')) { %>
+    <th><%= fmt_sort('Unconfirmed',     'messages_unconfirmed') %></th>
+<% } %>
+<% if (show_column('channels', 'prefetch')) { %>
     <th>Prefetch <span class="help" id="channel-prefetch"></span></th>
+<% } %>
+<% if (show_column('channels', 'msgs-unacked')) { %>
     <th><%= fmt_sort('Unacked',         'messages_unacknowledged') %></th>
-    <th><%= fmt_sort('Unconfirmed',     'messages_unconfirmed') %></th>
-    <th><%= fmt_sort('State',           'state') %></th>
-<% if (statistics_level == 'fine') { %>
+<% } %>
+<% if (show_column('channels', 'msgs-uncommitted')) { %>
+    <th><%= fmt_sort('Uncommitted msgs', 'messages_uncommitted') %></th>
+<% } %>
+<% if (show_column('channels', 'acks-uncommitted')) { %>
+    <th><%= fmt_sort('Uncommitted acks', 'acks_uncommitted') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('channels', 'rate-publish')) { %>
     <th><%= fmt_sort('publish', 'message_stats.publish_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-confirm')) { %>
     <th><%= fmt_sort('confirm', 'message_stats.confirm_details.rate') %></th>
-    <th>
-      <%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %>
-      <% if (col_redeliver) { %>
-        <sub><%= fmt_sort('of which redelivered', 'message_stats.redeliver_details.rate') %></sub>
-      <% } %>
-    </th>
+<% } %>
+<% if (show_column('channels', 'rate-return')) { %>
+    <th><%= fmt_sort('return (mandatory)', 'message_stats.return_unroutable_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-deliver')) { %>
+    <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-redeliver')) { %>
+    <th><%= fmt_sort('redelivered', 'message_stats.redeliver_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-ack')) { %>
     <th><%= fmt_sort('ack', 'message_stats.ack_details.rate') %></th>
-    <% if (col_return_unroutable) { %>
-        <th><%= fmt_sort('return (mandatory)', 'message_stats.return_unroutable_details.rate') %></th>
-    <% } %>
+<% } %>
 <% } %>
 <% } else { %>
 <!-- TODO make sortable after bug 23401 -->
     <th>Channel</th>
+<% if (show_column('channels', 'user')) { %>
+    <th>User name</th>
+<% } %>
+<% if (show_column('channels', 'mode')) { %>
     <th>Mode <span class="help" id="channel-mode"></span></th>
+<% } %>
+<% if (show_column('channels', 'state')) { %>
+    <th>State</th>
+<% } %>
+<% if (show_column('channels', 'msgs-unconfirmed')) { %>
+    <th>Unconfirmed</th>
+<% } %>
+<% if (show_column('channels', 'prefetch')) { %>
     <th>Prefetch <span class="help" id="channel-prefetch"></span></th>
+<% } %>
+<% if (show_column('channels', 'msgs-unacked')) { %>
     <th>Unacked</th>
-    <th>Unconfirmed</th>
-    <th>State</th>
-<% if (statistics_level == 'fine') { %>
+<% } %>
+<% if (show_column('channels', 'msgs-uncommitted')) { %>
+    <th>Uncommitted msgs</th>
+<% } %>
+<% if (show_column('channels', 'acks-uncommitted')) { %>
+    <th>Uncommitted acks</th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('channels', 'rate-publish')) { %>
     <th>publish</th>
+<% } %>
+<% if (show_column('channels', 'rate-confirm')) { %>
     <th>confirm</th>
-    <th>
-      deliver / get
-      <% if (col_redeliver) { %>
-        <sub>of which redelivered</sub>
-        <% } %>
-    </th>
+<% } %>
+<% if (show_column('channels', 'rate-return')) { %>
+    <th>return (mandatory)</th>
+<% } %>
+<% if (show_column('channels', 'rate-deliver')) { %>
+    <th>deliver / get</th>
+<% } %>
+<% if (show_column('channels', 'rate-redeliver')) { %>
+    <th>redelivered</th>
+<% } %>
+<% if (show_column('channels', 'rate-ack')) { %>
     <th>ack</th>
-    <% if (col_return_unroutable) { %>
-        <th>return (mandatory)</th>
-    <% } %>
+<% } %>
 <% } %>
 <% } %>
   </tr>
 <% if (mode == 'standalone' && nodes_interesting) { %>
     <td><%= fmt_node(channel.node) %></td>
 <% } %>
-<% if (mode == 'standalone') { %>
-<% if (vhosts_interesting) { %>
+<% if (mode == 'standalone' && vhosts_interesting) { %>
     <td class="c"><%= fmt_string(channel.vhost) %></td>
 <% } %>
+<% if (show_column('channels', 'user')) { %>
     <td class="c"><%= fmt_string(channel.user) %></td>
 <% } %>
-    <td class="l">
+<% if (show_column('channels', 'mode')) { %>
+    <td class="c">
       <%= fmt_channel_mode(channel) %>
-      <% if (channel.transactional) { %>
-      <small><acronym title="<%= channel.messages_uncommitted %> uncommitted messages"><%= channel.messages_uncommitted %>m</acronym>/<acronym title="<%= channel.acks_uncommitted %> uncommitted acks"><%= channel.acks_uncommitted %>a</acronym></small>
-      <% } %>
     </td>
+<% } %>
+<% if (show_column('channels', 'state')) { %>
+    <td class="c"><%= fmt_object_state(channel) %></td>
+<% } %>
+<% if (show_column('channels', 'msgs-unconfirmed')) { %>
+    <td class="c"><%= channel.messages_unconfirmed %></td>
+<% } %>
+<% if (show_column('channels', 'prefetch')) { %>
     <td class="c">
       <% if (channel.prefetch_count != 0) { %>
         <%= channel.prefetch_count %><br/>
         <%= channel.global_prefetch_count %> (global)
       <% } %>
     </td>
+<% } %>
+<% if (show_column('channels', 'msgs-unacked')) { %>
     <td class="c"><%= channel.messages_unacknowledged %></td>
-    <td class="c"><%= channel.messages_unconfirmed %></td>
-    <td class="c"><%= fmt_object_state(channel) %></td>
-<% if (statistics_level == 'fine') { %>
-    <td class="r"><%= fmt_rate(channel.message_stats, 'publish') %></td>
-    <td class="r"><%= fmt_rate(channel.message_stats, 'confirm') %></td>
-    <td class="r"><%= fmt_deliver_rate(channel.message_stats, col_redeliver) %></td>
-    <td class="r"><%= fmt_rate(channel.message_stats, 'ack') %></td>
-    <% if (col_return_unroutable) { %>
-    <td class="r"><%= fmt_rate(channel.message_stats, 'return_unroutable') %></td>
-    <% } %>
+<% } %>
+<% if (show_column('channels', 'msgs-uncommitted')) { %>
+    <td class="c"><%= channel.messages_uncommitted %></td>
+<% } %>
+<% if (show_column('channels', 'acks-uncommitted')) { %>
+    <td class="c"><%= channel.acks_uncommitted %></td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('channels', 'rate-publish')) { %>
+    <td class="r"><%= fmt_detail_rate(channel.message_stats, 'publish') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-confirm')) { %>
+    <td class="r"><%= fmt_detail_rate(channel.message_stats, 'confirm') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-return')) { %>
+    <td class="r"><%= fmt_detail_rate(channel.message_stats, 'return_unroutable') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-deliver')) { %>
+    <td class="r"><%= fmt_detail_rate(channel.message_stats, 'deliver_get') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-redeliver')) { %>
+    <td class="r"><%= fmt_detail_rate(channel.message_stats, 'redeliver') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-ack')) { %>
+    <td class="r"><%= fmt_detail_rate(channel.message_stats, 'ack') %></td>
+<% } %>
 <% } %>
   </tr>
   <% } %>
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/columns-options.ejs b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/columns-options.ejs
new file mode 100644 (file)
index 0000000..2059460
--- /dev/null
@@ -0,0 +1,25 @@
+<%
+   var mode = span.attr('for');
+%>
+
+<form action="#/column-options" method="put" class="auto-submit">
+  <input type="hidden" name="mode" value="<%= mode %>"/>
+  <table class="form" width="100%">
+    <tr>
+      <td colspan="2">
+        <h3>Columns for this table</h3>
+      </td>
+    </tr>
+<% for (var group in COLUMNS[mode]) {
+   var options = COLUMNS[mode][group];  %>
+    <tr>
+      <th><label><%= group %>:</label></th>
+      <td>
+      <% for (var i = 0; i < options.length; i++) { %>
+          <%= fmt_checkbox(mode + '-' + options[i][0], options[i][1], get_pref('column-' + mode + '-' + options[i][0]) == 'true') %>
+      <% } %>
+      </td>
+<% } %>
+    </tr>
+  </table>
+</form>
index 0e01e818b371046580cb8d71a0e67789cab6ce87..ae1ce9b08011d7a822706f650bb8f37341fb2651 100644 (file)
@@ -1,25 +1,18 @@
-<h1>Connection <b><%= fmt_string(connection.name) %></b></h1>
+<h1>Connection <b><%= fmt_string(connection.name) %></b><%= fmt_maybe_vhost(connection.vhost) %></h1>
 
 <div class="section">
 <h2>Overview</h2>
-<div class="hider">
+<div class="hider updatable">
   <%= data_rates('data-rates-conn', connection, 'Data rates') %>
 
-<div class="updatable">
 <h3>Details</h3>
-<table class="facts">
+<table class="facts facts-l">
 <% if (nodes_interesting) { %>
 <tr>
   <th>Node</th>
   <td><%= fmt_node(connection.node) %></td>
 </tr>
 <% } %>
-<% if (vhosts_interesting) { %>
-<tr>
- <th>Virtual host</th>
- <td><%= fmt_string(connection.vhost) %></td>
-</tr>
-<% } %>
 <tr>
  <th>Username</th>
  <td><%= fmt_string(connection.user) %></td>
  <th>Protocol</th>
  <td><%= connection.protocol %></td>
 </tr>
+<tr>
+  <th>Connected at</th>
+  <td><%= fmt_timestamp(connection.connected_at) %></td>
+</tr>
 
 <% if (connection.ssl) { %>
 <tr>
@@ -51,7 +48,7 @@
  <td><%= fmt_object_state(connection) %></td>
 </tr>
 <tr>
- <th>Timeout</th>
+ <th>Heartbeat</th>
  <td><%= fmt_time(connection.timeout, 's') %></td>
 </tr>
 <tr>
@@ -64,7 +61,6 @@
 </tr>
 </table>
 <% } %>
-</div>
 
 </div>
 </div>
index f60c94c469813e5d31773729e37beb16876a56fe..317328185e3bdb6ac183470de4b5fa3f6ded934d 100644 (file)
@@ -5,25 +5,61 @@
 <table class="list">
  <thead>
   <tr>
-   <th colspan="<% if (nodes_interesting) { %>7<% } else { %>6<% } %>">Network</th>
-   <th colspan="<% if (vhosts_interesting) { %>5<% } else { %>4<% } %>">Overview</th>
+    <%= group_heading('connections', 'Overview', [vhosts_interesting, nodes_interesting, true]) %>
+    <%= group_heading('connections', 'Details', []) %>
+    <%= group_heading('connections', 'Network', []) %>
+    <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="connections">+/-</span></th>
   </tr>
   <tr>
-    <th><%= fmt_sort('Name',         'name') %></th>
-    <th><%= fmt_sort('Protocol',     'protocol') %></th>
-    <th><%= fmt_sort('Client',       'properties') %></th>
-<% if (nodes_interesting) { %>
-    <th><%= fmt_sort('Node',         'node') %></th>
-<% } %>
-    <th><%= fmt_sort('From client',  'recv_oct_details.rate') %></th>
-    <th><%= fmt_sort('To client',    'send_oct_details.rate') %></th>
-    <th><%= fmt_sort('Timeout',      'timeout') %></th>
-    <th><%= fmt_sort('Channels',     'channels') %></th>
 <% if (vhosts_interesting) { %>
     <th><%= fmt_sort('Virtual host', 'vhost') %></th>
 <% } %>
-    <th><%= fmt_sort('User name',    'user') %></th>
-    <th><%= fmt_sort('State',        'state') %></th>
+    <th><%= fmt_sort('Name',           'name') %></th>
+<% if (nodes_interesting) { %>
+    <th><%= fmt_sort('Node',           'node') %></th>
+<% } %>
+<% if (show_column('connections',      'user')) { %>
+    <th><%= fmt_sort('User name',      'user') %></th>
+<% } %>
+<% if (show_column('connections',      'state')) { %>
+    <th><%= fmt_sort('State',          'state') %></th>
+<% } %>
+<% if (show_column('connections',      'ssl')) { %>
+    <th><%= fmt_sort('SSL / TLS',      'ssl') %></th>
+<% } %>
+<% if (show_column('connections',      'ssl_info')) { %>
+    <th>SSL Details</th>
+<% } %>
+<% if (show_column('connections',      'protocol')) { %>
+    <th><%= fmt_sort('Protocol',       'protocol') %></th>
+<% } %>
+<% if (show_column('connections',      'channels')) { %>
+    <th><%= fmt_sort('Channels',       'channels') %></th>
+<% } %>
+<% if (show_column('connections',      'channel_max')) { %>
+    <th><%= fmt_sort('Channel max',    'channel_max') %></th>
+<% } %>
+<% if (show_column('connections',      'frame_max')) { %>
+    <th><%= fmt_sort('Frame max',      'frame_max') %></th>
+<% } %>
+<% if (show_column('connections',      'auth_mechanism')) { %>
+    <th><%= fmt_sort('Auth mechanism', 'auth_mechanism') %></th>
+<% } %>
+<% if (show_column('connections',      'client')) { %>
+    <th><%= fmt_sort('Client',         'properties') %></th>
+<% } %>
+<% if (show_column('connections',      'from_client')) { %>
+    <th><%= fmt_sort('From client',    'recv_oct_details.rate') %></th>
+<% } %>
+<% if (show_column('connections',      'to_client')) { %>
+    <th><%= fmt_sort('To client',      'send_oct_details.rate') %></th>
+<% } %>
+<% if (show_column('connections',      'heartbeat')) { %>
+    <th><%= fmt_sort('Heartbeat',      'timeout') %></th>
+<% } %>
+<% if (show_column('connections',      'connected_at')) { %>
+    <th><%= fmt_sort('Connected at',   'connected_at') %></th>
+<% } %>
   </tr>
  </thead>
  <tbody>
     var connection = connections[i];
 %>
   <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+    <td><%= fmt_string(connection.vhost) %></td>
+<% } %>
     <td><%= link_conn(connection.name) %></td>
-    <td>
-      <%= connection.protocol %>
-      <% if (connection.ssl) { %>
-        <sub>SSL</sub>
-      <% } %>
-    </td>
-    <td><%= fmt_client_name(connection.client_properties) %></td>
 <% if (nodes_interesting) { %>
     <td><%= fmt_node(connection.node) %></td>
 <% } %>
-    <td><%= fmt_rate_bytes(connection, 'recv_oct') %></td>
-    <td><%= fmt_rate_bytes(connection, 'send_oct') %></td>
-    <td><%= fmt_time(connection.timeout, 's') %></td>
-    <td><%= connection.channels %></td>
-<% if (vhosts_interesting) { %>
-    <td><%= fmt_string(connection.vhost) %></td>
+<% if (show_column('connections', 'user')) { %>
+    <td class="c"><%= fmt_string(connection.user) %></td>
 <% } %>
-    <td><%= fmt_string(connection.user) %></td>
+<% if (show_column('connections', 'state')) { %>
     <td><%= fmt_object_state(connection) %></td>
+<% } %>
+<% if (show_column('connections', 'ssl')) { %>
+    <td class="c"><%= fmt_boolean(connection.ssl, '') %></td>
+<% } %>
+<% if (show_column('connections', 'ssl_info')) { %>
+    <td>
+    <% if (connection.ssl) { %>
+      <%= connection.ssl_protocol %>
+      <sub>
+        <%= connection.ssl_key_exchange %>
+        <%= connection.ssl_cipher %>
+        <%= connection.ssl_hash %>
+      </sub>
+    <% } %>
+    </td>
+<% } %>
+<% if (show_column('connections', 'protocol')) { %>
+    <td class="c"><%= connection.protocol %></td>
+<% } %>
+<% if (show_column('connections', 'channels')) { %>
+    <td class="r"><%= fmt_string(connection.channels, '') %></td>
+<% } %>
+<% if (show_column('connections', 'channel_max')) { %>
+    <td class="r"><%= fmt_string(connection.channel_max, '') %></td>
+<% } %>
+<% if (show_column('connections', 'frame_max')) { %>
+    <td class="r"><%= fmt_string(connection.frame_max, '') %></td>
+<% } %>
+<% if (show_column('connections', 'auth_mechanism')) { %>
+    <td class="c"><%= fmt_string(connection.auth_mechanism, '') %></td>
+<% } %>
+<% if (show_column('connections', 'client')) { %>
+    <td><%= fmt_client_name(connection.client_properties) %></td>
+<% } %>
+<% if (show_column('connections', 'from_client')) { %>
+    <td><%= fmt_detail_rate_bytes(connection, 'recv_oct') %></td>
+<% } %>
+<% if (show_column('connections', 'to_client')) { %>
+    <td><%= fmt_detail_rate_bytes(connection, 'send_oct') %></td>
+<% } %>
+<% if (show_column('connections', 'heartbeat')) { %>
+    <td class="r"><%= fmt_time(connection.timeout, 's') %></td>
+<% } %>
+<% if (show_column('connections', 'connected_at')) { %>
+    <td><%= fmt_timestamp_mini(connection.connected_at) %></td>
+<% } %>
   </tr>
   <% } %>
  </tbody>
index d8d54f2b64d6f446c0e4cc7f1661595878d055a3..4eb2496554f29b4a7aa8ab021cdd34b9f2575928 100644 (file)
@@ -1,13 +1,11 @@
-<h1>Exchange: <b><%= fmt_exchange(exchange.name) %></b></h1>
+<h1>Exchange: <b><%= fmt_exchange(exchange.name) %></b><%= fmt_maybe_vhost(exchange.vhost) %></h1>
 
 <div class="section">
   <h2>Overview</h2>
-  <div class="hider">
-<% if (statistics_level == 'fine') { %>
+  <div class="hider updatable">
+<% if (rates_mode != 'none') { %>
     <%= message_rates('msg-rates-x', exchange.message_stats) %>
 <% } %>
-
-    <div class="updatable">
     <h3>Details</h3>
     <table class="facts">
       <tr>
         <td class="l"><%= fmt_exchange_type(exchange.type) %></td>
       </tr>
       <tr>
-        <th>Parameters</th>
-        <td><%= fmt_parameters(exchange) %></td>
+        <th>Features</th>
+        <td><%= fmt_features(exchange) %></td>
       </tr>
       <tr>
         <th>Policy</th>
         <td><%= fmt_string(exchange.policy, '') %></td>
       </tr>
-<% if (vhosts_interesting) { %>
-      <tr>
-        <th>Virtual host</th>
-        <td><%= fmt_string(exchange.vhost) %></td>
-      </tr>
-<% } %>
     </table>
-    </div>
   </div>
 </div>
 
-<% if (statistics_level == 'fine') { %>
+<% if (rates_mode == 'detailed') { %>
 <div class="section-hidden">
 <h2>Message rates breakdown</h2>
 <div class="hider updatable">
index 7154289edc8da3e376f8e8554b09b202543db160..58589d5d6fa99272e9c717da3467a035a037998f 100644 (file)
    <th><%= fmt_sort('Virtual host', 'vhost') %></th>
 <% } %>
    <th><%= fmt_sort('Name',         'name') %></th>
+<% if (show_column('exchanges', 'type')) { %>
    <th><%= fmt_sort('Type',         'type') %></th>
-   <th><%= fmt_sort('Policy',       'policy') %></th>
-   <th>Parameters</th>
-<% if (statistics_level == 'fine') { %>
+<% } %>
+<% if (show_column('exchanges', 'features')) { %>
+   <th>Features</th>
+<% } %>
+<% if (show_column('exchanges', 'features_no_policy')) { %>
+   <th>Features</th>
+<% } %>
+<% if (show_column('exchanges', 'policy')) { %>
+    <th><%= fmt_sort('Policy','policy') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('exchanges', 'rate-in')) { %>
    <th><%= fmt_sort('Message rate in',   'message_stats.publish_in_details.rate') %></th>
+<% } %>
+<% if (show_column('exchanges', 'rate-out')) { %>
    <th><%= fmt_sort('Message rate out',  'message_stats.publish_out_details.rate') %></th>
 <% } %>
+<% } %>
+    <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="exchanges">+/-</span></th>
   </tr>
  </thead>
  <tbody>
 <% } %>
    <td><%= link_exchange(exchange.vhost, exchange.name, exchange.arguments) %></td>
    <td class="c"><%= fmt_exchange_type(exchange.type) %></td>
-   <td class="c"><%= fmt_string(exchange.policy, '') %></td>
-   <td class="c"><%= fmt_parameters_short(exchange) %></td>
-<% if (statistics_level == 'fine') { %>
-   <td class="r"><%= fmt_rate(exchange.message_stats, 'publish_in') %></td>
-   <td class="r"><%= fmt_rate(exchange.message_stats, 'publish_out') %></td>
+<% if (show_column('exchanges', 'features')) { %>
+   <td class="c">
+     <%= fmt_features_short(exchange) %>
+     <%= fmt_policy_short(exchange) %>
+   </td>
+<% } %>
+<% if (show_column('exchanges', 'features_no_policy')) { %>
+   <td class="c">
+     <%= fmt_features_short(exchange) %>
+   </td>
+<% } %>
+<% if (show_column('exchanges', 'policy')) { %>
+   <td class="c">
+     <%= fmt_string(exchange.policy) %>
+   </td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('exchanges', 'rate-in')) { %>
+   <td class="r"><%= fmt_detail_rate(exchange.message_stats, 'publish_in') %></td>
+<% } %>
+<% if (show_column('exchanges', 'rate-out')) { %>
+   <td class="r"><%= fmt_detail_rate(exchange.message_stats, 'publish_out') %></td>
+<% } %>
 <% } %>
   </tr>
   <% } %>
             </select>
           </td>
         </tr>
-        <tr>
-          <th><label>Alternate exchange: <span class="help" id="exchange-alternate"></span></label></th>
-          <td><input type="text" name="alternate-exchange"/></td>
-        </tr>
         <tr>
           <th><label>Arguments:</label></th>
           <td>
             <div class="multifield" id="arguments"></div>
+            <table class="argument-links">
+              <tr>
+                <td>Add</td>
+                <td>
+                  <span class="argument-link" field="arguments" key="alternate-exchange" type="string">Alternate exchange <span class="help" id="exchange-alternate"></span>
+                </td>
+              </tr>
+            </table>
           </td>
         </tr>
       </table>
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory-bar.ejs b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory-bar.ejs
new file mode 100644 (file)
index 0000000..a6d6ab6
--- /dev/null
@@ -0,0 +1,24 @@
+<div class="memory-bar">
+<%
+  var width = 800;
+
+  var pseudo_total = 0
+  for (var section in sections) {
+    pseudo_total += memory[section];
+  }
+
+  total_out[0] = pseudo_total;
+
+  for (var section in sections) {
+    if (memory[section] > 0) {
+    var section_width = Math.round(width * memory[section] / pseudo_total);
+%>
+  <div class="memory-section memory_<%= sections[section][0] %>"
+       style="width: <%= section_width %>px;"
+       title="<%= sections[section][1] %> <%= fmt_bytes(memory[section]) %>">
+  </div>
+<%
+     }
+   }
+%>
+</div>
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory-table.ejs b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/memory-table.ejs
new file mode 100644 (file)
index 0000000..7f90ab8
--- /dev/null
@@ -0,0 +1,28 @@
+<%
+  for (var i in key) {
+%>
+<table class="facts">
+<%
+  for (var j in key[i]) {
+   var group = key[i][j];
+%>
+  <tr>
+    <th><div class="colour-key memory_<%= group.colour %>"></div><%= group.name %></th>
+    <td>
+      <table class="mini">
+<%
+  for (var k in group.keys) {
+    var name  = group.keys[k][0];
+    var label = group.keys[k][1];
+%>
+        <tr>
+          <td class="r"><%= fmt_bytes(memory[name]) %></td>
+          <td><%= label %></td>
+        </tr>
+<% } %>
+      </table>
+    </td>
+  </tr>
+<% } %>
+</table>
+<% } %>
index 90b2df4493fee3bbc25f58826268e9ccf3994699..1fb6aff2970b4b29a9615af4eeaaa5be0498f4ff 100644 (file)
@@ -1,90 +1,59 @@
 <%
-  var width = 800;
   if (memory == "not_available") {
 %>
 <p class="warning">
   Memory statistics not available.
 </p>
 <% } else { %>
-<div class="memory-bar">
 <%
-  var sections = {'connection_procs' : 'Connections',
-                  'queue_procs'      : 'Queues',
-                  'plugins'          : 'Plugins',
-                  'other_proc'       : 'Other process memory',
-                  'mnesia'           : 'Mnesia',
-                  'msg_index'        : 'Message store index',
-                  'mgmt_db'          : 'Management database',
-                  'other_ets'        : 'Other ETS tables',
-                  'binary'           : 'Binaries',
-                  'code'             : 'Code',
-                  'atom'             : 'Atoms',
-                  'other_system'     : 'Other system'};
-  for (var section in sections) {
-    var section_width = Math.round(width * memory[section] / memory.total);
+  var sections = {'queue_procs'         : ['queue',  'Queues'],
+                  'queue_slave_procs'   : ['queue',  'Queues (slaves)'],
+                  'binary'              : ['binary', 'Binaries'],
+                  'connection_readers'  : ['conn',   'Connection readers'],
+                  'connection_writers'  : ['conn',   'Connection writers'],
+                  'connection_channels' : ['conn',   'Connection channels'],
+                  'connection_other'    : ['conn',   'Connections (other)'],
+                  'mnesia'              : ['table',  'Mnesia'],
+                  'msg_index'           : ['table',  'Message store index'],
+                  'mgmt_db'             : ['table',  'Management database'],
+                  'other_ets'           : ['table',  'Other ETS tables'],
+                  'plugins'             : ['proc',   'Plugins'],
+                  'other_proc'          : ['proc',   'Other process memory'],
+                  'code'                : ['system', 'Code'],
+                  'atom'                : ['system', 'Atoms'],
+                  'other_system'        : ['system', 'Other system']};
 %>
-  <div class="memory-section memory_<%= section %>"
-       style="width: <%= section_width %>px;"
-       title="<%= sections[section] %> <%= fmt_bytes(memory[section]) %>">
-  </div>
-<% } %>
-</div>
+<%= format('memory-bar', {sections: sections, memory: memory, total_out: []}) %>
 <span class="clear">&nbsp;</span>
 <div class="box">
-<table class="facts">
-  <tr>
-    <th>Connections</th>
-    <td><%= fmt_memory(memory, 'connection_procs') %></td>
-  </tr>
-  <tr>
-    <th>Queues</th>
-    <td><%= fmt_memory(memory, 'queue_procs') %></td>
-  </tr>
-  <tr>
-    <th>Plugins</th>
-    <td><%= fmt_memory(memory, 'plugins') %></td>
-  </tr>
-  <tr>
-    <th>Other process memory</th>
-    <td><%= fmt_memory(memory, 'other_proc') %></td>
-  </tr>
-</table>
-<table class="facts">
-  <tr>
-    <th>Mnesia</th>
-    <td><%= fmt_memory(memory, 'mnesia') %></td>
-  </tr>
-  <tr>
-    <th>Message store index</th>
-    <td><%= fmt_memory(memory, 'msg_index') %></td>
-  </tr>
-  <tr>
-    <th>Management database</th>
-    <td><%= fmt_memory(memory, 'mgmt_db') %></td>
-  </tr>
-  <tr>
-    <th>Other ETS tables</th>
-    <td><%= fmt_memory(memory, 'other_ets') %></td>
-  </tr>
-</table>
-<table class="facts">
-  <tr>
-    <th>Binaries</th>
-    <td><%= fmt_memory(memory, 'binary') %></td>
-  </tr>
-  <tr>
-    <th>Code</th>
-    <td><%= fmt_memory(memory, 'code') %></td>
-  </tr>
-  <tr>
-    <th>Atoms</th>
-    <td><%= fmt_memory(memory, 'atom') %></td>
-  </tr>
-  <tr>
-    <th>Other system</th>
-    <td><%= fmt_memory(memory, 'other_system') %></td>
-  </tr>
-</table>
+<%
+var key = [[{name: 'Queues', colour: 'queue',
+             keys: [['queue_procs',         'queues'],
+                    ['queue_slave_procs',   'slaves']]},
+            {name: 'Binaries', colour: 'binary',
+             keys: [['binary',              '']]}],
+
+           [{name: 'Connections', colour: 'conn',
+             keys: [['connection_readers',  'readers'],
+                    ['connection_writers',  'writers'],
+                    ['connection_channels', 'channels'],
+                    ['connection_other',    'other']]}],
+
+           [{name: 'Tables', colour: 'table',
+             keys: [['mnesia',              'mnesia'],
+                    ['msg_index',           'message store index'],
+                    ['mgmt_db',             'management database'],
+                    ['other_ets',           'other']]}],
+
+           [{name: 'Processes', colour: 'proc',
+             keys: [['plugins',             'plugins'],
+                    ['other_proc',          'other']]},
+            {name: 'System', colour: 'system',
+             keys: [['code',                'code'],
+                    ['atom',                'atoms'],
+                    ['other_system',        'other']]}]];
+%>
+<%= format('memory-table', {key: key, memory: memory}) %>
 </div>
 
 <div class="memory-info">
index 448adf792bcd1294d28e9f8e12030d2a30821989..ee9e6f6500b32fd226f386397832842481bcc357 100644 (file)
@@ -1,8 +1,5 @@
 <h3>Deliveries</h3>
 <% if (object && object.length > 0) { %>
-<%
-     var col_redeliver = !is_col_empty(object, 'redeliver', function(o) {return o.stats;});
-%>
 <table class="list">
   <tr>
 <% if (mode == 'queue') { %>
@@ -10,12 +7,7 @@
 <% } else { %>
     <th>Queue</th>
 <% } %>
-    <th>
-      deliver / get
-      <% if (col_redeliver) { %>
-        <sub>of which redelivered</sub>
-      <% } %>
-    </th>
+    <th>deliver / get</th>
     <th>ack</th>
   </tr>
 <%
@@ -28,8 +20,8 @@
 <% } else { %>
        <td><%= link_queue(del.queue.vhost, del.queue.name) %></td>
 <% } %>
-       <td class="r"><%= fmt_deliver_rate(del.stats, col_redeliver) %></td>
-       <td class="r"><%= fmt_rate(del.stats, 'ack') %></td>
+       <td class="r"><%= fmt_detail_rate(del.stats, 'deliver_get') %></td>
+       <td class="r"><%= fmt_detail_rate(del.stats, 'ack') %></td>
      </tr>
 <% } %>
 </table>
index 84f9343582240d12d964ffb078e807fbf0d940d4..4ea959ac52a71708b338ee250e6f6effaebb4b1e 100644 (file)
@@ -1,7 +1,6 @@
 <h3><%= label %></h3>
 <% if (object && object.length > 0) { %>
 <%
-     var col_return_unroutable = !is_col_empty(object, 'return_unroutable', function(o) {return o.stats;});
      var col_confirm = mode != 'exchange-outgoing';
 %>
 <table class="list">
@@ -18,9 +17,6 @@
     <th>publish</th>
 <% if (col_confirm) { %>
     <th>confirm</th>
-<% } %>
-<% if (col_return_unroutable) { %>
-    <th>return (mandatory)</th>
 <% } %>
   </tr>
 <%
 <% } else { %>
       <td><%= link_exchange(pub.exchange.vhost, pub.exchange.name) %></td>
 <% } %>
-      <td class="r"><%= fmt_rate(pub.stats, 'publish') %></td>
+      <td class="r"><%= fmt_detail_rate(pub.stats, 'publish') %></td>
 <% if (col_confirm) { %>
-      <td class="r"><%= fmt_rate(pub.stats, 'confirm') %></td>
-<% } %>
-<% if (col_return_unroutable) { %>
-      <td class="r"><%= fmt_rate(pub.stats, 'return_unroutable') %></td>
+      <td class="r"><%= fmt_detail_rate(pub.stats, 'confirm') %></td>
 <% } %>
     </tr>
 <% } %>
index 00d7489fbbf3c0e35619da1a5a5f05f1d68bc3c0..adde0cfca32f0e0778a4e7eb07bf63e4a5a3979f 100644 (file)
@@ -1,20 +1,80 @@
 <h1>Node <b><%= node.name %></b></h1>
+<div class="updatable">
 
-<div class="section">
-<h2>Overview</h2>
-<div class="hider updatable">
 <% if (!node.running) { %>
 <p class="warning">Node not running</p>
 <% } else if (node.os_pid == undefined) { %>
 <p class="warning">Node statistics not available</p>
 <% } else { %>
+
+<div class="section">
+<h2>Overview</h2>
+<div class="hider">
+  <div class="box">
+  <table class="facts facts-l">
+    <tr>
+      <th>Uptime</th>
+      <td><%= fmt_uptime(node.uptime) %></td>
+    </tr>
+<% if (rabbit_versions_interesting) { %>
+    <tr>
+      <th>RabbitMQ Version</th>
+      <td><%= fmt_rabbit_version(node.applications) %></td>
+    </tr>
+<% } %>
+    <tr>
+      <th>Type</th>
+      <td>
+       <% if (node.type == 'disc') { %>
+         <acronym title="Broker definitions are held on disc.">Disc</acronym>
+       <% } else { %>
+         <acronym title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</acronym>
+       <% } %>
+      </td>
+    </tr>
+  </table>
+
+  <%= format('paths', {node: node}) %>
+  </div>
+
+  <h3>Plugins <span class="help" id="plugins"></span></h3>
+  <table class="list">
+    <tr>
+      <th>Name</th>
+      <th>Version</th>
+      <th>Description</th>
+    </tr>
+    <%
+       var plugins = get_plugins_list(node);
+       for (var j = 0; j < plugins.length; j++) {
+         var application = plugins[j];
+    %>
+           <tr<%= alt_rows(j)%>>
+             <td><%= application.name %></td>
+             <td><%= application.version %></td>
+             <td><%= application.description %></td>
+           </tr>
+    <% } %>
+  </table>
+</div>
+</div>
+
+<div class="section">
+<h2>Process statistics</h2>
+<div class="hider">
+  <%= node_stats_prefs() %>
   <table class="facts">
     <tr>
       <th>
         File descriptors <span class="help" id="file-descriptors"></span>
       </th>
       <td>
-<%= fmt_resource_bar_count(fmt_fd_used(node.fd_used, node.fd_total), node.fd_total, FD_THRESHOLDS) %>
+<% if (node.fd_used != 'install_handle_from_sysinternals') { %>
+        <%= node_stat_count('fd_used', 'fd_total', node, FD_THRESHOLDS) %>
+<% } else { %>
+        <p class="c">handle.exe missing <span class="help" id="handle-exe"></span><sub><%= node.fd_total %> available</sub></p>
+
+<% } %>
       </td>
     </tr>
     <tr>
@@ -22,7 +82,7 @@
         Socket descriptors <span class="help" id="socket-descriptors"></span>
       </th>
       <td>
-<%= fmt_resource_bar_count(node.sockets_used, node.sockets_total, FD_THRESHOLDS) %>
+        <%= node_stat_count('sockets_used', 'sockets_total', node, FD_THRESHOLDS) %>
       </td>
     </tr>
     <tr>
         Erlang processes
       </th>
      <td>
-<%= fmt_resource_bar_count(node.proc_used, node.proc_total, PROCESS_THRESHOLDS) %>
+        <%= node_stat_count('proc_used', 'proc_total', node, PROCESS_THRESHOLDS) %>
      </td>
     </tr>
-  </table>
-  <table class="facts">
     <tr>
       <th>
         Memory
       </th>
       <td>
 <% if (node.mem_limit != 'memory_monitoring_disabled') { %>
-   <%= fmt_resource_bar(fmt_bytes(node.mem_used),
-                        fmt_bytes(node.mem_limit) + ' high watermark',
-                        node.mem_used / node.mem_limit,
-                        node.mem_alarm ? 'red' : 'green',
-                        node.mem_alarm ? 'memory-alarm' : null) %>
+   <%= node_stat('mem_used', 'Used', 'mem_limit', 'high watermark', node,
+                 fmt_bytes, fmt_bytes_axis,
+                 node.mem_alarm ? 'red' : 'green',
+                 node.mem_alarm ? 'memory-alarm' : null) %>
 <% } else { %>
    <%= fmt_bytes(node.mem_used) %>
 <% } %>
       </th>
       <td>
 <% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
-   <%= fmt_resource_bar(fmt_bytes(node.disk_free),
-                        fmt_bytes(node.disk_free_limit) + ' low watermark',
-                        node.disk_free_limit / node.disk_free,
-                        node.disk_free_alarm ? 'red' : 'green',
-                        node.disk_free_alarm ? 'disk_free-alarm' : null) %>
+   <%= node_stat('disk_free', 'Free', 'disk_free_limit', 'low watermark', node,
+                 fmt_bytes, fmt_bytes_axis,
+                 node.disk_free_alarm ? 'red' : 'green',
+                 node.disk_free_alarm ? 'disk_free-alarm' : null,
+                 true) %>
 <% } else { %>
          (not available)
 <% } %>
       </td>
     </tr>
   </table>
+</div>
+</div>
 
-  <table class="facts">
-    <tr>
-      <th>Uptime</th>
-      <td><%= fmt_uptime(node.uptime) %></td>
-    </tr>
-<% if (rabbit_versions_interesting) { %>
-    <tr>
-      <th>RabbitMQ Version</th>
-      <td><%= fmt_rabbit_version(node.applications) %></td>
-    </tr>
+<div class="section-hidden">
+<h2>Persistence statistics</h2>
+<div class="hider">
+  <%= rates_chart_or_text('mnesia-stats-count', node,
+      [['RAM only', 'mnesia_ram_tx_count'],
+       ['Disk', 'mnesia_disk_tx_count']],
+      fmt_rate, fmt_rate_axis, true, 'Mnesia transactions', 'mnesia-transactions') %>
+
+  <%= rates_chart_or_text('persister-msg-stats-count', node,
+      [['QI Journal', 'queue_index_journal_write_count'],
+       ['Store Read', 'msg_store_read_count'],
+       ['Store Write', 'msg_store_write_count']],
+      fmt_rate, fmt_rate_axis, true, 'Persistence operations (messages)', 'persister-operations-msg') %>
+
+  <%= rates_chart_or_text('persister-bulk-stats-count', node,
+      [['QI Read', 'queue_index_read_count'],
+       ['QI Write', 'queue_index_write_count']],
+      fmt_rate, fmt_rate_axis, true, 'Persistence operations (bulk)', 'persister-operations-bulk') %>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>I/O statistics</h2>
+<div class="hider">
+  <%= rates_chart_or_text('persister-io-stats-count', node,
+      [['Read', 'io_read_count'],
+       ['Write', 'io_write_count'],
+       ['Seek', 'io_seek_count'],
+       ['Sync', 'io_sync_count'],
+       ['Reopen', 'io_reopen_count']],
+      fmt_rate, fmt_rate_axis, true, 'I/O operations', 'io-operations') %>
+
+  <%= rates_chart_or_text('persister-io-stats-bytes', node,
+      [['Read', 'io_read_bytes'],
+       ['Write', 'io_write_bytes']],
+      fmt_rate_bytes, fmt_rate_bytes_axis, true, 'I/O data rates') %>
+
+  <%= rates_chart_or_text('persister-io-stats-time', node,
+      [['Read', 'io_read_avg_time'],
+       ['Write', 'io_write_avg_time'],
+       ['Seek', 'io_seek_avg_time'],
+       ['Sync', 'io_sync_avg_time']],
+      fmt_ms, fmt_ms, false, 'I/O average time per operation') %>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Cluster links</h2>
+<div class="hider">
+<% if (node.cluster_links.length > 0) { %>
+<table class="list">
+  <tr>
+    <th>Remote node</th>
+    <th>Local address</th>
+    <th>Local port</th>
+    <th>Remote address</th>
+    <th>Remote port</th>
+    <th class="plain">
+      <%= chart_h3('cluster-link-data-rates', 'Data rates') %>
+    </th>
+  </tr>
+  <%
+   for (var i = 0; i < node.cluster_links.length; i++) {
+     var link = node.cluster_links[i];
+  %>
+   <tr<%= alt_rows(i)%>>
+     <td><%= link_node(link.name) %></td>
+     <td><%= fmt_string(link.sock_addr) %></td>
+     <td><%= fmt_string(link.sock_port) %></td>
+     <td><%= fmt_string(link.peer_addr) %></td>
+     <td><%= fmt_string(link.peer_port) %></td>
+     <td class="plain">
+       <%= rates_chart_or_text_no_heading(
+           'cluster-link-data-rates', 'cluster-link-data-rates' + link.name,
+           link.stats,
+           [['Recv', 'recv_bytes'],
+            ['Send', 'send_bytes']],
+           fmt_rate_bytes, fmt_rate_bytes_axis, true) %>
+     </td>
+   </tr>
 <% } %>
-    <tr>
-      <th>Type</th>
-      <td>
-       <% if (node.type == 'disc') { %>
-         <acronym title="Broker definitions are held on disc.">Disc</acronym>
-       <% } else { %>
-         <acronym title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</acronym>
-       <% } %>
-      </td>
-    </tr>
-  </table>
+</table>
+<% } else { %>
+  <p>... no cluster links ...</p>
 <% } %>
 </div>
 </div>
 
+<% } %>
+
+</div>
+
+<!--
+  The next two need to be non-updatable or we will wipe the memory details
+  as soon as we have drawn it.
+ -->
+
+<% if (node.running && node.os_pid != undefined) { %>
+
 <div class="section">
 <h2>Memory details</h2>
 <div class="hider">
 </div>
 
 <div class="section-hidden">
-<h2>Applications</h2>
-<div class="hider updatable">
-<% if (!node.running) { %>
-<p class="warning">Node not running</p>
-<% } else if (node.os_pid == undefined) { %>
-<p class="warning">Node statistics not available</p>
-<% } else { %>
-<table class="list">
-    <tr>
-      <th>Name</th>
-      <th>Version</th>
-    </tr>
-    <%
-      for (var j = 0; j < node.applications.length; j++) {
-        var application = node.applications[j];
-    %>
-       <tr<%= alt_rows(j)%>>
-         <td>
-           <%= application.name %>
-           <sub><%= application.description %></sub>
-         </td>
-         <td><%= application.version %></td>
-       </tr>
-    <% } %>
-</table>
-<% } %>
+<h2>Binary references</h2>
+<div class="hider">
+  <p>
+    <b>Warning:</b> Calculating binary memory use can be expensive if
+    there are many small binaries in the system.
+  </p>
+  <div id="binary-details"></div>
+  <button class="update-manual memory-button" for="binary-details" query="binary">Update</button>
 </div>
 </div>
 
-<div class="section-hidden">
-<h2>Registry</h2>
-<div class="hider updatable">
-<% if (!node.running) { %>
-<p class="warning">Node not running</p>
-<% } else if (node.os_pid == undefined) { %>
-<p class="warning">Node statistics not available</p>
-<% } else { %>
-<h3>Exchange types</h3>
-<%= format('registry', {'list': node.exchange_types, 'node': node, 'show_enabled': false} ) %>
-<h3>Authentication mechanisms</h3>
-<%= format('registry', {'list': node.auth_mechanisms, 'node': node, 'show_enabled': true} ) %>
 <% } %>
-</div>
-</div>
+
+<div class="updatable">
+<% if (node.running && node.os_pid != undefined) { %>
 
 <div class="section-hidden">
 <h2>Advanced</h2>
-<div class="hider updatable">
-<% if (!node.running) { %>
-<p class="warning">Node not running</p>
-<% } else if (node.os_pid == undefined) { %>
-<p class="warning">Node statistics not available</p>
-<% } else { %>
+<div class="hider">
   <div class="box">
   <h3>VM</h3>
   <table class="facts">
       <td><%= node.os_pid %></td>
     </tr>
     <tr>
-      <th>Statistics</th>
-      <td><%= node.statistics_level %></td>
+      <th>Rates mode</th>
+      <td><%= node.rates_mode %></td>
+    </tr>
+    <tr>
+      <th>Net ticktime</th>
+      <td><%= node.net_ticktime %>s</td>
     </tr>
   </table>
 
       <td><%= node.processors %></td>
     </tr>
   </table>
-<% } %>
+  </div>
+
+<h3>All applications</h3>
+<table class="list">
+    <tr>
+      <th>Name</th>
+      <th>Version</th>
+      <th>Description</th>
+    </tr>
+    <%
+      for (var j = 0; j < node.applications.length; j++) {
+        var application = node.applications[j];
+    %>
+       <tr<%= alt_rows(j)%>>
+         <td><%= application.name %></td>
+         <td><%= application.version %></td>
+         <td><%= application.description %></td>
+       </tr>
+    <% } %>
+</table>
+
+<h3>Exchange types</h3>
+<%= format('registry', {'list': node.exchange_types, 'node': node, 'show_enabled': false} ) %>
+<h3>Authentication mechanisms</h3>
+<%= format('registry', {'list': node.auth_mechanisms, 'node': node, 'show_enabled': true} ) %>
+
+</div>
 </div>
+
+<% } %>
+
 </div>
index 7a886eaa2817b72b828b76990d4b0d9eb432f86b..86ff6ac17f395236049da17a45b56fb5d3218986 100644 (file)
@@ -2,19 +2,36 @@
 <% if (user_monitor) { %>
 <%= format('partition', {'nodes': nodes}) %>
 <% } %>
+
+<div class="updatable">
+<% if (overview.statistics_db_event_queue > 1000) { %>
+<p class="warning">
+  The management statistics database currently has a queue
+  of <b><%= overview.statistics_db_event_queue %></b> events to
+  process. If this number keeps increasing, so will the memory used by
+  the management plugin.
+
+  <% if (overview.rates_mode != 'none') { %>
+  You may find it useful to set the <code>rates_mode</code> config item
+  to <code>none</code>.
+  <% } %>
+</p>
+<% } %>
+</div>
+
 <div class="section">
 <h2>Totals</h2>
-<div class="hider">
+<div class="hider updatable">
 <% if (overview.statistics_db_node != 'not_running') { %>
   <%= queue_lengths('lengths-over', overview.queue_totals) %>
-<% if (statistics_level == 'fine') { %>
+<% if (rates_mode != 'none') { %>
   <%= message_rates('msg-rates-over', overview.message_stats) %>
 <% } %>
 <% } else { %>
     Totals not available
 <% } %>
 
-<div class="updatable">
+<% if (overview.object_totals) { %>
   <h3>Global counts <span class="help" id="resource-counts"></span></h3>
 
   <div class="box">
     </div>
 <% } %>
   </div>
- </div>
+<% } %>
 
 </div>
 </div>
 
 <% if (user_monitor) { %>
 <div class="section">
+<% if (nodes.length == 1) { %>
+<h2>Node</h2>
+<% } else { %>
 <h2>Nodes</h2>
+<% } %>
+
 <div class="hider updatable">
+<% if (nodes.length == 1) { %>
+    <p>Node: <%= nodes[0].name %> <a href="#/nodes/<%= esc(nodes[0].name) %>">(More about this node)</a></p>
+<% } %>
+
 <table class="list">
   <tr>
+<% if (nodes.length > 1) { %>
     <th>Name</th>
-    <th>
-      File descriptors <span class="help" id="file-descriptors"></span>
-    </th>
-    <th>
-      Socket descriptors <span class="help" id="socket-descriptors"></span>
-    </th>
-    <th>
-      Erlang processes
-    </th>
-    <th>
-      Memory
-    </th>
-    <th>
-      Disk space
-    </th>
+<% } %>
+
+  <% if (show_column('overview', 'file_descriptors')) { %>
+    <th>File descriptors <span class="help" id="file-descriptors"></span></th>
+  <% } %>
+  <% if (show_column('overview', 'socket_descriptors')) { %>
+    <th>Socket descriptors <span class="help" id="socket-descriptors"></span></th>
+  <% } %>
+  <% if (show_column('overview', 'erlang_processes')) { %>
+    <th>Erlang processes</th>
+  <% } %>
+  <% if (show_column('overview', 'memory')) { %>
+    <th>Memory</th>
+  <% } %>
+  <% if (show_column('overview', 'disk_space')) { %>
+    <th>Disk space</th>
+  <% } %>
+  <% if (show_column('overview', 'uptime')) { %>
     <th>Uptime</th>
-    <th>Type</th>
+  <% } %>
+  <% if (show_column('overview', 'rates_mode')) { %>
+    <th>Rates mode</th>
+  <% } %>
+  <% if (show_column('overview', 'info')) { %>
+    <th>Info</th>
+  <% } %>
+    <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="overview">+/-</span></th>
   </tr>
 <%
    for (var i = 0; i < nodes.length; i++) {
      var node = nodes[i];
+     var colspan = group_count('overview', 'Statistics', []) +
+                   group_count('overview', 'General', []);
 %>
    <tr<%= alt_rows(i)%>>
+<% if (nodes.length > 1) { %>
      <td>
        <%= link_node(node.name) %>
        <% if (rabbit_versions_interesting) { %>
          <sub>RabbitMQ <%= fmt_rabbit_version(node.applications) %></sub>
        <% } %>
      </td>
+<% } %>
 <% if (!node.running) { %>
-     <td colspan="6">
+     <td colspan="<%= colspan %>">
        <div class="status-red">
          Node not running
        </div>
      </td>
 <% } else if (node.os_pid == undefined) { %>
-     <td colspan="6">
+     <td colspan="<%= colspan %>">
        <div class="status-yellow">
          <acronym title="The rabbitmq_management_agent plugin should be enabled on this node. If it is not, various statistics will be inaccurate.">
            Node statistics not available</acronym>
        </div>
      </td>
 <% } else { %>
+  <% if (show_column('overview', 'file_descriptors')) { %>
      <td>
-<%= fmt_resource_bar_count(fmt_fd_used(node.fd_used, node.fd_total), node.fd_total, FD_THRESHOLDS) %>
+    <% if (node.fd_used != 'install_handle_from_sysinternals') { %>
+        <%= node_stat_count_bar('fd_used', 'fd_total', node, FD_THRESHOLDS) %>
+    <% } else { %>
+        <p class="c">handle.exe missing <span class="help" id="handle-exe"></span><sub><%= node.fd_total %> available</sub></p>
+
+    <% } %>
      </td>
+  <% } %>
+  <% if (show_column('overview', 'socket_descriptors')) { %>
      <td>
-<%= fmt_resource_bar_count(node.sockets_used, node.sockets_total, FD_THRESHOLDS) %>
+        <%= node_stat_count_bar('sockets_used', 'sockets_total', node, FD_THRESHOLDS) %>
      </td>
+  <% } %>
+  <% if (show_column('overview', 'erlang_processes')) { %>
      <td>
-<%= fmt_resource_bar_count(node.proc_used, node.proc_total, PROCESS_THRESHOLDS) %>
+
+        <%= node_stat_count_bar('proc_used', 'proc_total', node, PROCESS_THRESHOLDS) %>
      </td>
+  <% } %>
+  <% if (show_column('overview', 'memory')) { %>
      <td>
-<% if (node.mem_limit != 'memory_monitoring_disabled') { %>
-   <%= fmt_resource_bar(fmt_bytes(node.mem_used),
-                        fmt_bytes(node.mem_limit) + ' high watermark',
-                        node.mem_used / node.mem_limit,
-                        node.mem_alarm ? 'red' : 'green',
-                        node.mem_alarm ? 'memory-alarm' : null) %>
-<% } else { %>
+
+    <% if (node.mem_limit != 'memory_monitoring_disabled') { %>
+        <%= node_stat_bar('mem_used', 'mem_limit', 'high watermark', node, fmt_bytes_axis,
+                          node.mem_alarm ? 'red' : 'green',
+                          node.mem_alarm ? 'memory-alarm' : null) %>
+    <% } else { %>
        <%= fmt_bytes(node.mem_used) %>
-<% } %>
+    <% } %>
      </td>
+  <% } %>
+  <% if (show_column('overview', 'disk_space')) { %>
      <td>
-<% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
-   <%= fmt_resource_bar(fmt_bytes(node.disk_free),
-                        fmt_bytes(node.disk_free_limit) + ' low watermark',
-                        node.disk_free_limit / node.disk_free,
-                        node.disk_free_alarm ? 'red' : 'green',
-                        node.disk_free_alarm ? 'disk-free-alarm' : null) %>
-<% } else { %>
+
+    <% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
+        <%= node_stat_bar('disk_free', 'disk_free_limit', 'low watermark', node, fmt_bytes_axis,
+                          node.disk_free_alarm ? 'red' : 'green',
+                          node.disk_free_alarm ? 'disk_free-alarm' : null, true) %>
+    <% } else { %>
          (not available)
-<% } %>
+    <% } %>
      </td>
-     <td class="r">
-       <%= fmt_uptime(node.uptime) %>
-     </td>
-<% } %>
+  <% } %>
+  <% if (show_column('overview', 'uptime')) { %>
+     <td class="r"><%= fmt_uptime(node.uptime) %></td>
+  <% } %>
+  <% if (show_column('overview', 'rates_mode')) { %>
+     <td class="c"><%= fmt_string(node.rates_mode) %></td>
+  <% } %>
+  <% if (show_column('overview', 'info')) { %>
      <td class="c">
        <% if (node.type == 'disc') { %>
          <acronym title="Broker definitions are held on disc.">Disc</acronym>
        <% } else { %>
          <acronym title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</acronym>
        <% } %>
+       <%= fmt_plugins_small(node) %>
        <% if (overview.statistics_db_node == node.name) { %>
          <acronym title="This node contains the management statistics database">Stats</acronym>
        <% } %>
-       <% if (overview.node == node.name) { %>
-         <acronym title="You are accessing the management UI from this node.">*</acronym>
-       <% } %>
      </td>
+  <% } %>
+<% } %>
    </tr>
 <% } %>
 </table>
 <% if (overview.statistics_db_node == 'not_running') { %>
   <p class="status-error">Statistics database could not be contacted. Message rates and queue lengths will not be shown.</p>
 <% } %>
+
+<% if (nodes.length == 1 && nodes[0].os_pid != undefined) { %>
+    <h3>Paths</h3>
+    <%= format('paths', {node: nodes[0]}) %>
+<% } %>
+
 </div>
 </div>
 
-<div class="section">
+<div class="section-hidden">
 <h2>Ports and contexts</h2>
 <div class="hider updatable">
 <h3>Listening ports</h3>
 </div>
 </div>
 
-<% if (overview.statistics_level != 'fine') { %>
+<% if (overview.rates_mode == 'none') { %>
 <div class="section-hidden">
-<h2>Message Rates Disabled</h2>
+<h2>Message rates disabled</h2>
 <div class="hider">
 <p>
-  The statistics level in this RabbitMQ server is currently set to
-  <code><%= overview.statistics_level %></code>. Message rates are therefore
-  disabled.
+  Message rates are currently disabled.
 </p>
 <p>
-  To re-enable message rates, edit your configuration file and either
-  set <code>collect_statistics</code> to <code>fine</code> in
-  the <code>rabbit</code> application, or
-  set <code>force_fine_statistics</code> to <code>true</code> in
-  the <code>rabbitmq_management_agent</code> application
+  To re-enable message rates, edit your configuration file and
+  set <code>rates_mode</code> to <code>basic</code>
+  or <code>detailed</code> in the <code>rabbitmq_management</code>
+  application
 </p>
 </div>
 </div>
index baaa0e279b938656851a487b01c6b0f3423dc751..bc22fe27d58415f18b872ea0b567d071da88e0a8 100644 (file)
 %>
 <p class="status-error">
   Network partition detected<br/><br/>
-  Mnesia reports that this RabbitMQ cluster has experienced a network partition. This is a dangerous situation. RabbitMQ clusters should not be installed on networks which can experience partitions.
+  Mnesia reports that this RabbitMQ cluster has experienced a
+  network partition. There is a risk of losing data. Please read
+  <a href="http://www.rabbitmq.com/partitions.html">RabbitMQ
+  documentation about network partitions and the possible solutions</a>.
 </p>
 <p>
   The nature of the partition is as follows:
   network partitions.</a>
 </p>
 <% } %>
+<%
+   var ticktime = null;
+   var ticktimes_unequal = false;
+   for (var i = 0; i < nodes.length; i++) {
+     var node_ticktime = nodes[i].net_ticktime;
+     if (node_ticktime != undefined) {
+
+       if (ticktime != null && node_ticktime != ticktime) {
+         ticktimes_unequal = true;
+       }
+       ticktime = nodes[i].net_ticktime;
+     }
+   }
+   if (ticktimes_unequal) {
+%>
+<p class="status-error">
+  The <code>kernel</code> <code>net_ticktime</code> values are set
+  differently for different nodes in this cluster.
+</p>
+<p>
+  The values are:
+</p>
+  <table class="list">
+    <tr><th>Node</th><th>net_ticktime</th></tr>
+<%
+   for (var i = 0; i < nodes.length; i++) {
+%>
+      <tr<%= alt_rows(i)%>>
+        <td><%= nodes[i].name %></td>
+        <td><%= nodes[i].net_ticktime %></td>
+      </tr>
+<%
+   }
+%>
+  </table>
+<p>
+  This is a dangerous configuration; use of substantially
+  unequal <code>net_timetime</code> values can lead to partitions
+  being falsely detected.
+</p>
+<p>
+  <a target="_blank"
+  href="http://www.rabbitmq.com/nettick.html">More information on
+  <code>net_ticktime</code>.</a>
+</p>
+<%
+   }
+%>
 </div>
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/paths.ejs b/rabbitmq-server/plugins-src/rabbitmq-management/priv/www/js/tmpl/paths.ejs
new file mode 100644 (file)
index 0000000..c854657
--- /dev/null
@@ -0,0 +1,38 @@
+<table class="facts facts-l">
+  <tr>
+    <th>
+<% if (node.config_files.length == 1) { %>
+      Config file
+<% } else { %>
+      Config files
+<% } %>
+
+    </th>
+    <td>
+<%
+   for (var i = 0; i < node.config_files.length; i++) {
+     var config = node.config_files[i];
+%>
+      <code><%= config %></code>
+<% } %>
+    </td>
+  </tr>
+  <tr>
+    <th>Database directory</th>
+    <td>
+      <code><%= node.db_dir %></code>
+    </td>
+  </tr>
+  <tr>
+    <th>Log file</th>
+    <td>
+      <code><%= node.log_file %></code>
+    </td>
+  </tr>
+  <tr>
+    <th>SASL log file</th>
+    <td>
+      <code><%= node.sasl_log_file %></code>
+    </td>
+  </tr>
+</table>
index 37fbc3e14e88f42f485c1e983b16302bd82661b1..9e4e3c2c3f263cd3a16aa2b3c20721e1f34faff4 100644 (file)
             </select>
           </td>
         </tr>
-        <tr>
-          <th><label>Definition: <span class="help" id="policy-definitions"></span></label></th>
-          <td><div class="multifield" id="definition"></div></td>
-          <td class="t"><span class="mand">*</span></td>
-        </tr>
         <tr>
           <th><label>Priority:</label></th>
           <td><input type="text" name="priority"/></td>
         </tr>
+        <tr>
+          <th><label>Definition:</label></th>
+          <td>
+            <div class="multifield" id="definition"></div>
+            <table class="argument-links">
+              <tr>
+                <td>HA</td>
+                <td>
+                  <span class="argument-link" field="definition" key="ha-mode" type="string">HA mode</span> <span class="help" id="policy-ha-mode"></span> |
+                  <span class="argument-link" field="definition" key="ha-params" type="number">HA params</span> <span class="help" id="policy-ha-params"></span> |
+                  <span class="argument-link" field="definition" key="ha-sync-mode" type="string">HA sync mode</span> <span class="help" id="policy-ha-sync-mode"></span>
+                </td>
+              </tr>
+              <tr>
+                <td>Federation</td>
+                <td>
+                  <span class="argument-link" field="definition" key="federation-upstream-set" type="string">Federation upstream set</span> <span class="help" id="policy-federation-upstream-set"></span> |
+                  <span class="argument-link" field="definition" key="federation-upstream" type="string">Federation upstream</span> <span class="help" id="policy-federation-upstream"></span>
+                </td>
+              </tr>
+              <tr>
+                <td>Queues</td>
+                <td>
+                  <span class="argument-link" field="definition" key="message-ttl" type="number">Message TTL</span> |
+                  <span class="argument-link" field="definition" key="expires" type="number">Auto expire</span> |
+                  <span class="argument-link" field="definition" key="max-length" type="number">Max length</span> |
+                  <span class="argument-link" field="definition" key="max-length-bytes" type="number">Max length bytes</span><br/>
+                  <span class="argument-link" field="definition" key="dead-letter-exchange" type="string">Dead letter exchange</span> |
+                  <span class="argument-link" field="definition" key="dead-letter-routing-key" type="string">Dead letter routing key</span>
+                </td>
+              </tr>
+              <tr>
+                <td>Exchanges</td>
+                <td>
+                  <span class="argument-link" field="definition" key="alternate-exchange" type="string">Alternate exchange</span>
+                </td>
+              </tr>
+            </table>
+          </td>
+          <td class="t"><span class="mand">*</span></td>
+        </tr>
       </table>
       <input type="submit" value="Add policy"/>
     </form>
index 5fc6720fe3a82b48fc4f6a20d95ee09044cc9f2c..c4a58c44b6c64ae0d7467142674bf404c3366ee8 100644 (file)
@@ -1,13 +1,9 @@
-<h1>Policy: <b><%= fmt_string(policy.name) %></b></h1>
+<h1>Policy: <b><%= fmt_string(policy.name) %></b><%= fmt_maybe_vhost(policy.vhost) %></h1>
 
 <div class="section">
   <h2>Overview</h2>
   <div class="hider">
     <table class="facts">
-      <tr>
-        <th>Virtual Host</th>
-        <td><%= fmt_string(policy.vhost) %></td>
-      </tr>
       <tr>
         <th>Pattern</th>
         <td><%= fmt_string(policy.pattern) %></td>
index 6ac46e98cdc4bea42a67cc4dffa5a2a29880fdc0..46fcd4426c81ca59c371f5945524086c71032fa3 100644 (file)
@@ -1,92 +1,27 @@
-<h1>Queue <b><%= fmt_string(queue.name) %></b></h1>
+<h1>Queue <b><%= fmt_string(queue.name) %></b><%= fmt_maybe_vhost(queue.vhost) %></h1>
 
 <div class="section">
   <h2>Overview</h2>
-  <div class="hider">
+  <div class="hider updatable">
     <%= queue_lengths('lengths-q', queue) %>
-<% if (statistics_level == 'fine') { %>
+<% if (rates_mode != 'none') { %>
     <%= message_rates('msg-rates-q', queue.message_stats) %>
 <% } %>
 
-    <div class="updatable">
     <h3>Details</h3>
-    <table class="facts">
+    <table class="facts facts-l">
       <tr>
-        <th>Parameters</th>
-        <td><%= fmt_parameters(queue) %></td>
+        <th>Features</th>
+        <td><%= fmt_features(queue) %></td>
       </tr>
       <tr>
         <th>Policy</th>
         <td><%= fmt_string(queue.policy, '') %></td>
       </tr>
+      <% if (queue.owner_pid_details != undefined) { %>
       <tr>
         <th>Exclusive owner</th>
-        <td>
-          <% if (queue.owner_pid_details == undefined) { %>
-            None
-          <% } else { %>
-            <%= link_conn(queue.owner_pid_details.name) %>
-          <% } %>
-        </td>
-      </tr>
-    </table>
-
-    <table class="facts">
-      <tr>
-        <th>State</th>
-        <td><%= fmt_object_state(queue) %></td>
-      </tr>
-      <tr>
-        <th>Consumers</th>
-        <td><%= fmt_string(queue.consumers) %></td>
-      </tr>
-      <tr>
-        <th>Consumer utilisation <span class="help" id="queue-consumer-utilisation"></th>
-        <td><%= fmt_percent(queue.consumer_utilisation) %></td>
-      </tr>
-      <tr>
-        <th>Memory</th>
-        <td><%= fmt_bytes(queue.memory) %></td>
-      </tr>
-    </table>
-
-    <table class="facts">
-      <tr>
-        <th>
-          Paging <span class="help" id="queue-memory-resident"></span>
-        </th>
-        <td>
-          <% var messages_ram = queue.backing_queue_status.ram_msg_count + queue.backing_queue_status.ram_ack_count; %>
-          <% if (messages_ram == queue.messages) { %>
-            No paging
-          <% } else { %>
-            <%= fmt_num_thousands(messages_ram) %> /
-            <%= fmt_num_thousands(queue.messages) %> msg (in RAM / total)
-          <% } %>
-          <sub>
-            <% if (queue.backing_queue_status.target_ram_count == 'infinity') { %>
-              No limit
-            <% } else { %>
-              RAM target: <%= fmt_num_thousands(queue.backing_queue_status.target_ram_count) %> msg
-            <% } %>
-          </sub>
-        </td>
-      </tr>
-      <tr>
-        <th>
-          Persistent <span class="help" id="queue-persistent"></span>
-        </th>
-        <td>
-          <%= fmt_num_thousands(queue.backing_queue_status.persistent_count) %> msg
-        </td>
-      </tr>
-    </table>
-
-    <table class="facts">
-<% if (vhosts_interesting) { %>
-      <tr>
-        <th>Virtual host</th>
-        <td><%= fmt_string(queue.vhost) %></td>
+        <td><%= link_conn(queue.owner_pid_details.name) %></td>
       </tr>
 <% } %>
 <% if (nodes_interesting) { %>
@@ -94,6 +29,7 @@
         <th>Node</th>
         <td><%= fmt_node(queue.node) %></td>
       </tr>
+  <% if (queue.owner_pid_details == undefined) { %>
       <tr>
         <th>Slaves</th>
         <td>
           <% } %>
         </td>
       </tr>
+  <% } %>
 <% } %>
     </table>
-    </div>
+
+    <table class="facts facts-l">
+      <tr>
+        <th>State</th>
+        <td><%= fmt_object_state(queue) %></td>
+      </tr>
+      <tr>
+        <th>Consumers</th>
+        <td><%= fmt_string(queue.consumers) %></td>
+      </tr>
+      <tr>
+        <th>Consumer utilisation <span class="help" id="queue-consumer-utilisation"></th>
+        <td><%= fmt_percent(queue.consumer_utilisation) %></td>
+      </tr>
+    </table>
+
+    <table class="facts">
+      <tr>
+        <td></td>
+        <th class="horizontal">Total</th>
+        <th class="horizontal">Ready</th>
+        <th class="horizontal">Unacked</th>
+        <th class="horizontal">In memory</th>
+        <th class="horizontal">Persistent</th>
+      </tr>
+      <tr>
+        <th>
+          Messages
+          <span class="help" id="queue-messages"></span>
+        </th>
+        <td class="r">
+          <%= fmt_num_thousands(queue.messages) %>
+        </td>
+        <td class="r">
+          <%= fmt_num_thousands(queue.messages_ready) %>
+        </td>
+        <td class="r">
+          <%= fmt_num_thousands(queue.messages_unacknowledged) %>
+        </td>
+        <td class="r">
+          <%= fmt_num_thousands(queue.messages_ram) %>
+        </td>
+        <td class="r">
+          <%= fmt_num_thousands(queue.messages_persistent) %>
+        </td>
+      </tr>
+      <tr>
+        <th>
+          Message body bytes
+          <span class="help" id="queue-message-body-bytes"></span>
+        </th>
+        <td class="r">
+          <%= fmt_bytes(queue.message_bytes) %>
+        </td>
+        <td class="r">
+          <%= fmt_bytes(queue.message_bytes_ready) %>
+        </td>
+        <td class="r">
+          <%= fmt_bytes(queue.message_bytes_unacknowledged) %>
+        </td>
+        <td class="r">
+          <%= fmt_bytes(queue.message_bytes_ram) %>
+        </td>
+        <td class="r">
+          <%= fmt_bytes(queue.message_bytes_persistent) %>
+        </td>
+      </tr>
+      <tr>
+        <th>
+          Process memory
+          <span class="help" id="queue-process-memory"></span>
+        </th>
+        <td class="r"><%= fmt_bytes(queue.memory) %></td>
+      </tr>
+    </table>
   </div>
 </div>
 
-<% if (statistics_level == 'fine') { %>
+<% if (rates_mode == 'detailed') { %>
 <div class="section-hidden">
 <h2>Message rates breakdown</h2>
 <div class="hider updatable">
   </div>
 </div>
 
+<% if (user_policymaker) { %>
+<div class="section-hidden">
+  <h2>Move messages</h2>
+  <div class="hider">
+  <% if (NAVIGATION['Admin'][0]['Shovel Management'] == undefined) { %>
+    <p>To move messages, the shovel plugin must be enabled, try:</p>
+    <pre>$ rabbitmq-plugins enable rabbitmq_shovel rabbitmq_shovel_management</pre>
+  <% } else { %>
+    <p>
+      The shovel plugin can be used to move messages from this queue
+      to another one. The form below will create a temporary shovel to
+      move messages to another queue on the same virtual host, with
+      default settings.
+    </p>
+    <p>
+      For more options <a href="#/dynamic-shovels">see the shovel
+      interface</a>.
+    </p>
+    <form action="#/shovel-parameters" method="put">
+      <input type="hidden" name="component" value="shovel"/>
+      <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+      <input type="hidden" name="name" value="Move from <%= fmt_string(queue.name) %>"/>
+      <input type="hidden" name="src-uri" value="amqp:///<%= esc(queue.vhost) %>"/>
+      <input type="hidden" name="src-queue" value="<%= fmt_string(queue.name) %>"/>
+
+      <input type="hidden" name="dest-uri" value="amqp:///<%= esc(queue.vhost) %>"/>
+      <input type="hidden" name="prefetch-count" value="1000"/>
+      <input type="hidden" name="add-forward-headers" value="false"/>
+      <input type="hidden" name="ack-mode" value="on-confirm"/>
+      <input type="hidden" name="delete-after" value="queue-length"/>
+      <input type="hidden" name="redirect" value="#/queues"/>
+
+      <table class="form">
+        <tr>
+          <th>Destination queue:</th>
+          <td><input type="text" name="dest-queue"/></td>
+        </tr>
+      </table>
+      <input type="submit" value="Move messages"/>
+    </form>
+  <% } %>
+  </div>
+</div>
+<% } %>
+
 <div class="section-hidden">
   <h2>Delete / purge</h2>
   <div class="hider">
index a579ee9d42d88dfe1c1cf607ad780700434ff3c9..0c3fed87aa7ce47d433c8a3f7bc554a38be58ca3 100644 (file)
@@ -5,17 +5,16 @@
 <%= filter_ui(queues) %>
   <div class="updatable">
 <% if (queues.length > 0) { %>
-<%
-   var col_redeliver = !is_col_empty(queues, 'redeliver');
-%>
 <table class="list">
  <thead>
   <tr>
-    <th colspan="<% if (nodes_interesting && vhosts_interesting) { %>7<% } else if (nodes_interesting || vhosts_interesting) { %>6<% } else { %>5<% } %>">Overview</th>
-    <th colspan="3">Messages</th>
-<% if (statistics_level == 'fine') { %>
-    <th colspan="3">Message rates</th>
+    <%= group_heading('queues', 'Overview', [vhosts_interesting, nodes_interesting, true]) %>
+    <%= group_heading('queues', 'Messages', []) %>
+    <%= group_heading('queues', 'Message bytes', []) %>
+<% if (rates_mode != 'none') { %>
+    <%= group_heading('queues', 'Message rates', []) %>
 <% } %>
+    <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="queues">+/-</span></th>
   </tr>
   <tr>
 <% if (vhosts_interesting) { %>
 <% if (nodes_interesting) { %>
     <th><%= fmt_sort('Node',         'node') %></th>
 <% } %>
-    <th><%= fmt_sort('Exclusive',    'owner_pid_details.name') %></th>
-    <th>Parameters</th>
-    <th><%= fmt_sort('Policy',       'policy') %></th>
+<% if (show_column('queues', 'features')) { %>
+    <th>Features</th>
+<% } %>
+<% if (show_column('queues', 'features_no_policy')) { %>
+    <th>Features</th>
+<% } %>
+<% if (show_column('queues', 'policy')) { %>
+    <th><%= fmt_sort('Policy','policy') %></th>
+<% } %>
+<% if (show_column('queues', 'consumers')) { %>
+    <th><%= fmt_sort('Consumers',    'consumers') %></th>
+<% } %>
+<% if (show_column('queues', 'consumer_utilisation')) { %>
+    <th><%= fmt_sort('Consumer utilisation', 'consumer_utilisation') %></th>
+<% } %>
+<% if (show_column('queues', 'state')) { %>
     <th><%= fmt_sort('State',        'state') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-ready')) { %>
     <th><%= fmt_sort('Ready',        'messages_ready') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-unacked')) { %>
     <th><%= fmt_sort('Unacked',      'messages_unacknowledged') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-ram')) { %>
+    <th><%= fmt_sort('In Memory',    'messages_ram') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-persistent')) { %>
+    <th><%= fmt_sort('Persistent',   'messages_persistent') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-total')) { %>
     <th><%= fmt_sort('Total',        'messages') %></th>
-<% if (statistics_level == 'fine') { %>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ready')) { %>
+    <th><%= fmt_sort('Ready',        'message_bytes_ready') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-unacked')) { %>
+    <th><%= fmt_sort('Unacked',      'message_bytes_unacknowledged') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ram')) { %>
+    <th><%= fmt_sort('In Memory',    'message_bytes_ram') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-persistent')) { %>
+    <th><%= fmt_sort('Persistent',   'message_bytes_persistent') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-total')) { %>
+    <th><%= fmt_sort('Total',        'message_bytes') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+  <% if (show_column('queues', 'rate-incoming')) { %>
     <th><%= fmt_sort('incoming', 'message_stats.publish_details.rate') %></th>
-    <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %>
-  <% if (col_redeliver) { %>
-    <sub><%= fmt_sort('of which redelivered', 'message_stats.redeliver_details.rate') %></sub>
   <% } %>
-</th>
+  <% if (show_column('queues', 'rate-deliver')) { %>
+    <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %></th>
+  <% } %>
+  <% if (show_column('queues', 'rate-redeliver')) { %>
+    <th><%= fmt_sort('redelivered', 'message_stats.redeliver_details.rate') %></th>
+  <% } %>
+  <% if (show_column('queues', 'rate-ack')) { %>
     <th><%= fmt_sort('ack', 'message_stats.ack_details.rate') %></th>
+  <% } %>
 <% } %>
   </tr>
  </thead>
      <% } %>
    </td>
 <% } %>
+<% if (show_column('queues', 'features')) { %>
    <td class="c">
-     <% if (queue.owner_pid_details != undefined) { %>
-     <%= link_conn(queue.owner_pid_details.name, "Owner") %>
-     <% } %>
+     <%= fmt_features_short(queue) %>
+     <%= fmt_policy_short(queue) %>
    </td>
-   <td class="c"><%= fmt_parameters_short(queue) %></td>
-   <td class="c"><%= fmt_string(queue.policy, '') %></td>
+<% } %>
+<% if (show_column('queues', 'features_no_policy')) { %>
+   <td class="c"><%= fmt_features_short(queue) %></td>
+<% } %>
+<% if (show_column('queues', 'policy')) { %>
+   <td class="c"><%= fmt_string(queue.policy) %></td>
+<% } %>
+<% if (show_column('queues', 'consumers')) { %>
+   <td class="c"><%= fmt_string(queue.consumers) %></td>
+<% } %>
+<% if (show_column('queues', 'consumer_utilisation')) { %>
+   <td class="c"><%= fmt_percent(queue.consumer_utilisation) %></td>
+<% } %>
+<% if (show_column('queues', 'state')) { %>
    <td class="c"><%= fmt_object_state(queue) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-ready')) { %>
    <td class="r"><%= fmt_num_thousands(queue.messages_ready) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-unacked')) { %>
    <td class="r"><%= fmt_num_thousands(queue.messages_unacknowledged) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-ram')) { %>
+   <td class="r"><%= fmt_num_thousands(queue.messages_ram) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-persistent')) { %>
+   <td class="r"><%= fmt_num_thousands(queue.messages_persistent) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-total')) { %>
    <td class="r"><%= fmt_num_thousands(queue.messages) %></td>
-<% if (statistics_level == 'fine') { %>
-    <td class="r"><%= fmt_rate(queue.message_stats, 'publish') %></td>
-    <td class="r"><%= fmt_deliver_rate(queue.message_stats, col_redeliver) %></td>
-    <td class="r"><%= fmt_rate(queue.message_stats, 'ack') %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ready')) { %>
+   <td class="r"><%= fmt_bytes(queue.message_bytes_ready) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-unacked')) { %>
+   <td class="r"><%= fmt_bytes(queue.message_bytes_unacknowledged) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ram')) { %>
+   <td class="r"><%= fmt_bytes(queue.message_bytes_ram) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-persistent')) { %>
+   <td class="r"><%= fmt_bytes(queue.message_bytes_persistent) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-total')) { %>
+   <td class="r"><%= fmt_bytes(queue.message_bytes) %></td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+  <% if (show_column('queues', 'rate-incoming')) { %>
+    <td class="r"><%= fmt_detail_rate(queue.message_stats, 'publish') %></td>
+  <% } %>
+  <% if (show_column('queues', 'rate-deliver')) { %>
+    <td class="r"><%= fmt_detail_rate(queue.message_stats, 'deliver_get') %></td>
+  <% } %>
+  <% if (show_column('queues', 'rate-redeliver')) { %>
+    <td class="r"><%= fmt_detail_rate(queue.message_stats, 'redeliver') %></td>
+  <% } %>
+  <% if (show_column('queues', 'rate-ack')) { %>
+    <td class="r"><%= fmt_detail_rate(queue.message_stats, 'ack') %></td>
+  <% } %>
 <% } %>
   </tr>
   <% } %>
             </select>
           </td>
         </tr>
-        <tr>
-          <th><label>Message TTL: <span class="help" id="queue-message-ttl"></span></label></th>
-          <td><input type="text" name="x-message-ttl"/> ms</td>
-        </tr>
-        <tr>
-          <th><label>Auto expire: <span class="help" id="queue-expires"></span></label></th>
-          <td><input type="text" name="x-expires"/> ms</td>
-        </tr>
-        <tr>
-          <th><label>Max length: <span class="help" id="queue-max-length"></span></label></th>
-          <td><input type="text" name="x-max-length"/></td>
-        </tr>
-        <tr>
-          <th><label>Dead letter exchange: <span class="help" id="queue-dead-letter-exchange"></span></label></th>
-          <td><input type="text" name="x-dead-letter-exchange"/></td>
-        </tr>
-        <tr>
-          <th><label>Dead letter routing key: <span class="help" id="queue-dead-letter-routing-key"></span></label></th>
-          <td><input type="text" name="x-dead-letter-routing-key"/></td>
-        </tr>
         <tr>
           <th><label>Arguments:</label></th>
-          <td><div class="multifield" id="arguments"></div></td>
+          <td>
+            <div class="multifield" id="arguments"></div>
+            <table class="argument-links">
+              <tr>
+                <td>Add</td>
+                <td>
+                  <span class="argument-link" field="arguments" key="x-message-ttl" type="number">Message TTL</span> <span class="help" id="queue-message-ttl"></span> |
+                  <span class="argument-link" field="arguments" key="x-expires" type="number">Auto expire</span> <span class="help" id="queue-expires"></span> |
+                  <span class="argument-link" field="arguments" key="x-max-length" type="number">Max length</span> <span class="help" id="queue-max-length"></span> |
+                  <span class="argument-link" field="arguments" key="x-max-length-bytes" type="number">Max length bytes</span> <span class="help" id="queue-max-length-bytes"></span><br/>
+                  <span class="argument-link" field="arguments" key="x-dead-letter-exchange" type="string">Dead letter exchange</span> <span class="help" id="queue-dead-letter-exchange"></span> |
+                  <span class="argument-link" field="arguments" key="x-dead-letter-routing-key" type="string">Dead letter routing key</span> <span class="help" id="queue-dead-letter-routing-key"></span> |
+                  <span class="argument-link" field="arguments" key="x-max-priority" type="number">Maximum priority</span> <span class="help" id="queue-max-priority"></span>
+                </td>
+              </tr>
+            </table>
+          </td>
         </tr>
       </table>
       <input type="submit" value="Add queue"/>
index 904eb8e6455ccdac9798b6443f37a9d5f28906d0..da15b0ed5b8905aee56723bfb85ffe679854e89d 100644 (file)
@@ -2,7 +2,7 @@
    var id = span.attr('for');
    var mode = get_pref('rate-mode-' + id);
    var size = get_pref('chart-size-' + id);
-   var range = get_pref('chart-range-' + id);
+   var range = get_pref('chart-range');
 %>
 
 <form action="#/rate-options" method="put" class="auto-submit">
@@ -18,7 +18,9 @@
       <td>
         <%= fmt_radio('mode', 'Chart',         'chart', mode) %>
         <%= fmt_radio('mode', 'Current value',  'curr', mode) %>
-        <%= fmt_radio('mode', 'Moving average',  'avg', mode) %>
+        <% if (id != 'node-stats') { %>
+             <%= fmt_radio('mode', 'Moving average',  'avg', mode) %>
+        <% } %>
       </td>
     </tr>
     <tr>
         <%= fmt_radio('size', 'Large',   'large', size) %>
       </td>
     </tr>
+    <tr>
+      <td colspan="2">
+        <h3>All time series</h3>
+      </td>
+    </tr>
     <tr>
       <th><label>Chart range:</label></th>
       <td>
index 1f4ba287b5c62464cde6ff062bbfd60155a57274..f0dfc4dfca7edb560a69273a30203a7e46c768e6 100644 (file)
           <td>
             <input type="text" name="tags" id="tags" />
             <span class="help" id="user-tags"/>
-            <sub>
-              [<span class="tag-link" tag="administrator">Admin</span>]
-              [<span class="tag-link" tag="monitoring">Monitoring</span>]
-              [<span class="tag-link" tag="policymaker">Policymaker</span>]
-              [<span class="tag-link" tag="management">Management</span>]
-              [<span class="tag-link" tag="">None</span>]
-            </sub>
+            <table class="argument-links">
+              <tr>
+                <td>Set</td>
+                <td>
+                  <span class="tag-link" tag="administrator">Admin</span> |
+                  <span class="tag-link" tag="monitoring">Monitoring</span> |
+                  <span class="tag-link" tag="policymaker">Policymaker</span> |
+                  <span class="tag-link" tag="management">Management</span> |
+                  <span class="tag-link" tag="">None</span>
+                </td>
+              </tr>
+            </table>
           </td>
         </tr>
       </table>
index 3a92f4ae270352ff5f9535864da07314568269a1..fe45177af105c2398a2ab810e01a53cf50a7f620 100644 (file)
@@ -9,13 +9,12 @@
 
 <div class="section">
   <h2>Overview</h2>
-  <div class="hider">
+  <div class="hider updatable">
     <%= queue_lengths('lengths-vhost', vhost) %>
-<% if (statistics_level == 'fine') { %>
+<% if (rates_mode != 'none') { %>
     <%= message_rates('msg-rates-vhost', vhost.message_stats) %>
 <% } %>
     <%= data_rates('data-rates-vhost', vhost, 'Data rates') %>
-  <div class="updatable">
     <h3>Details</h3>
     <table class="facts">
       <tr>
@@ -23,7 +22,6 @@
         <td><%= fmt_boolean(vhost.tracing) %></td>
       </tr>
     </table>
-  </div>
 </div>
 </div>
 
index cf9b6373c0d5b817b0149b251d6cf3f76f9894bb..a6cbe9faf5924668097afa16570a0123039e7d75 100644 (file)
   <thead>
   <tr>
     <th colspan="2">Overview</th>
-    <th colspan="3">Messages</th>
-    <th colspan="2">Data rates</th>
-<% if (statistics_level == 'fine') { %>
-    <th colspan="2">Message rates</th>
+    <%= group_heading('vhosts', 'Messages', []) %>
+    <%= group_heading('vhosts', 'Network', []) %>
+<% if (rates_mode != 'none') { %>
+    <%= group_heading('vhosts', 'Message rates', []) %>
 <% } %>
+    <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="vhosts">+/-</span></th>
   </tr>
     <tr>
       <th><%= fmt_sort('Name', 'name') %></th>
       <th>Users <span class="help" id="internal-users-only"></span></th>
+<% if (show_column('vhosts',           'msgs-ready')) { %>
       <th><%= fmt_sort('Ready',        'messages_ready') %></th>
+<% } %>
+<% if (show_column('vhosts',           'msgs-unacked')) { %>
       <th><%= fmt_sort('Unacked',      'messages_unacknowledged') %></th>
+<% } %>
+<% if (show_column('vhosts',           'msgs-total')) { %>
       <th><%= fmt_sort('Total',        'messages') %></th>
-      <th><%= fmt_sort('From clients', 'recv_oct_details.rate') %></th>
-      <th><%= fmt_sort('To clients',   'send_oct_details.rate') %></th>
-<% if (statistics_level == 'fine') { %>
-      <th><%= fmt_sort('publish',      'message_stats.publish_details.rate') %></th>
-      <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %>
+<% } %>
+<% if (show_column('vhosts',           'from_client')) { %>
+      <th><%= fmt_sort('From client',  'recv_oct_details.rate') %></th>
+<% } %>
+<% if (show_column('vhosts',           'to_client')) { %>
+      <th><%= fmt_sort('To client',    'send_oct_details.rate') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+  <% if (show_column('vhosts',         'rate-publish')) { %>
+      <th><%= fmt_sort('publish',     'message_stats.publish_details.rate') %></th>
+  <% } %>
+  <% if (show_column('vhosts',         'rate-deliver')) { %>
+      <th><%= fmt_sort('deliver / get','message_stats.deliver_get_details.rate') %></th>
+  <% } %>
 <% } %>
     </tr>
   </thead>
          <td><%= link_vhost(vhost.name) %></td>
          <td class="c"><%= fmt_permissions(vhost, permissions, 'vhost', 'user',
                            '<p class="warning">No users</p>') %></td>
-         <td class="r"><%= fmt_string(vhost.messages_ready) %></td>
-         <td class="r"><%= fmt_string(vhost.messages_unacknowledged) %></td>
-         <td class="r"><%= fmt_string(vhost.messages) %></td>
-         <td class="r"><%= fmt_rate_bytes(vhost, 'recv_oct') %></td>
-         <td class="r"><%= fmt_rate_bytes(vhost, 'send_oct') %></td>
-<% if (statistics_level == 'fine') { %>
-         <td class="r"><%= fmt_rate(vhost.message_stats, 'publish') %></td>
-         <td class="r"><%= fmt_deliver_rate(vhost.message_stats, false) %></td>
+<% if (show_column('vhosts', 'msgs-ready')) { %>
+   <td class="r"><%= fmt_num_thousands(vhost.messages_ready) %></td>
+<% } %>
+<% if (show_column('vhosts', 'msgs-unacked')) { %>
+   <td class="r"><%= fmt_num_thousands(vhost.messages_unacknowledged) %></td>
+<% } %>
+<% if (show_column('vhosts', 'msgs-total')) { %>
+   <td class="r"><%= fmt_num_thousands(vhost.messages) %></td>
+<% } %>
+<% if (show_column('vhosts', 'from_client')) { %>
+    <td><%= fmt_detail_rate_bytes(vhost, 'recv_oct') %></td>
+<% } %>
+<% if (show_column('vhosts', 'to_client')) { %>
+    <td><%= fmt_detail_rate_bytes(vhost, 'send_oct') %></td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+  <% if (show_column('vhosts', 'rate-publish')) { %>
+    <td class="r"><%= fmt_detail_rate(vhost.message_stats, 'publish') %></td>
+  <% } %>
+  <% if (show_column('vhosts', 'rate-deliver')) { %>
+    <td class="r"><%= fmt_detail_rate(vhost.message_stats, 'deliver_get') %></td>
+  <% } %>
 <% } %>
        </tr>
     <% } %>
index a8b08948982f04d7e8912c2d24adf9a67e96f578..b5f4b6b9504f19c6a4c06503c6186e8290203dd2 100644 (file)
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_app).
 
 -behaviour(application).
--export([start/2, stop/1]).
+-export([start/2, stop/1, reset_dispatcher/1]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
@@ -28,7 +28,7 @@
 start(_Type, _StartArgs) ->
     {ok, Listener} = application:get_env(rabbitmq_management, listener),
     setup_wm_logging(),
-    register_context(Listener),
+    register_context(Listener, []),
     log_startup(Listener),
     rabbit_mgmt_sup_sup:start_link().
 
@@ -36,18 +36,28 @@ stop(_State) ->
     unregister_context(),
     ok.
 
-register_context(Listener) ->
+%% At the point at which this is invoked we have both newly enabled
+%% apps and about-to-disable apps running (so that
+%% rabbit_mgmt_reset_handler can look at all of them to find
+%% extensions). Therefore we have to explicitly exclude
+%% about-to-disable apps from our new dispatcher.
+reset_dispatcher(IgnoreApps) ->
+    unregister_context(),
+    {ok, Listener} = application:get_env(rabbitmq_management, listener),
+    register_context(Listener, IgnoreApps).
+
+register_context(Listener, IgnoreApps) ->
     rabbit_web_dispatch:register_context_handler(
-      ?CONTEXT, Listener, "", make_loop(), "RabbitMQ Management").
+      ?CONTEXT, Listener, "", make_loop(IgnoreApps), "RabbitMQ Management").
 
 unregister_context() ->
     rabbit_web_dispatch:unregister_context(?CONTEXT).
 
-make_loop() ->
-    Dispatch = rabbit_mgmt_dispatcher:build_dispatcher(),
+make_loop(IgnoreApps) ->
+    Dispatch = rabbit_mgmt_dispatcher:build_dispatcher(IgnoreApps),
     WMLoop = rabbit_webmachine:makeloop(Dispatch),
     LocalPaths = [filename:join(module_path(M), ?STATIC_PATH) ||
-                     M <- rabbit_mgmt_dispatcher:modules()],
+                     M <- rabbit_mgmt_dispatcher:modules(IgnoreApps)],
     fun(Req) -> respond(Req, LocalPaths, WMLoop) end.
 
 module_path(Module) ->
index eda933f78358ced859d99bd31cc21865779da912..e7cb753160ce5ddc25b4c58c62fe9b59816e80c7 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_db).
 -export([start_link/0]).
 
 -export([augment_exchanges/3, augment_queues/3,
-         augment_nodes/1, augment_vhosts/2,
+         augment_nodes/2, augment_vhosts/2,
          get_channel/2, get_connection/2,
          get_all_channels/1, get_all_connections/1,
+         get_all_consumers/0, get_all_consumers/1,
          get_overview/2, get_overview/1]).
 
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-         code_change/3, handle_pre_hibernate/1, prioritise_cast/3,
-         format_message_queue/2]).
+         code_change/3, handle_pre_hibernate/1,
+         prioritise_cast/3, prioritise_call/4, format_message_queue/2]).
 
 %% For testing
 -export([override_lookups/1, reset_lookups/0]).
           gc_next_key,
           lookups,
           interval,
-          event_refresh_ref}).
+          event_refresh_ref,
+          rates_mode}).
 
 -define(FINE_STATS_TYPES, [channel_queue_stats, channel_exchange_stats,
                            channel_queue_exchange_stats]).
 -define(TABLES, [queue_stats, connection_stats, channel_stats,
                  consumers_by_queue, consumers_by_channel,
-                 node_stats]).
+                 node_stats, node_node_stats]).
 
 -define(DELIVER_GET, [deliver, deliver_no_ack, get, get_no_ack]).
 -define(FINE_STATS, [publish, publish_in, publish_out,
                      ack, deliver_get, confirm, return_unroutable, redeliver] ++
             ?DELIVER_GET).
 
--define(COARSE_QUEUE_STATS,
-        [messages, messages_ready, messages_unacknowledged]).
+%% Most come from channels as fine stats, but queues emit these directly.
+-define(QUEUE_MSG_RATES, [disk_reads, disk_writes]).
+
+-define(MSG_RATES, ?FINE_STATS ++ ?QUEUE_MSG_RATES).
+
+-define(QUEUE_MSG_COUNTS, [messages, messages_ready, messages_unacknowledged]).
+
+-define(COARSE_NODE_STATS,
+        [mem_used, fd_used, sockets_used, proc_used, disk_free,
+         io_read_count,  io_read_bytes,  io_read_avg_time,
+         io_write_count, io_write_bytes, io_write_avg_time,
+         io_sync_count,  io_sync_avg_time,
+         io_seek_count,  io_seek_avg_time,
+         io_reopen_count, mnesia_ram_tx_count,  mnesia_disk_tx_count,
+         msg_store_read_count, msg_store_write_count,
+         queue_index_journal_write_count,
+         queue_index_write_count, queue_index_read_count]).
+
+-define(COARSE_NODE_NODE_STATS, [send_bytes, recv_bytes]).
+
+%% Normally 0 and no history means "has never happened, don't
+%% report". But for these things we do want to report even at 0 with
+%% no history.
+-define(ALWAYS_REPORT_STATS,
+        [io_read_avg_time, io_write_avg_time,
+         io_sync_avg_time | ?QUEUE_MSG_COUNTS]).
 
 -define(COARSE_CONN_STATS, [recv_oct, send_oct]).
 
@@ -175,6 +201,14 @@ prioritise_cast({event, #event{type  = Type,
 prioritise_cast(_Msg, _Len, _State) ->
     0.
 
+%% We want timely replies to queries even when overloaded, so return 5
+%% as priority. Also we only have access to the queue length here, not
+%% in handle_call/3, so stash it in the dictionary. This is a bit ugly
+%% but better than fiddling with gen_server2 even more.
+prioritise_call(_Msg, _From, Len, _State) ->
+    put(last_queue_length, Len),
+    5.
+
 %%----------------------------------------------------------------------------
 %% API
 %%----------------------------------------------------------------------------
@@ -194,7 +228,7 @@ start_link() ->
 augment_exchanges(Xs, R, M) -> safe_call({augment_exchanges, Xs, R, M}, Xs).
 augment_queues(Qs, R, M)    -> safe_call({augment_queues, Qs, R, M}, Qs).
 augment_vhosts(VHosts, R)   -> safe_call({augment_vhosts, VHosts, R}, VHosts).
-augment_nodes(Nodes)        -> safe_call({augment_nodes, Nodes}, Nodes).
+augment_nodes(Nodes, R)     -> safe_call({augment_nodes, Nodes, R}, Nodes).
 
 get_channel(Name, R)        -> safe_call({get_channel, Name, R}, not_found).
 get_connection(Name, R)     -> safe_call({get_connection, Name, R}, not_found).
@@ -202,6 +236,9 @@ get_connection(Name, R)     -> safe_call({get_connection, Name, R}, not_found).
 get_all_channels(R)         -> safe_call({get_all_channels, R}).
 get_all_connections(R)      -> safe_call({get_all_connections, R}).
 
+get_all_consumers()         -> safe_call({get_all_consumers, all}).
+get_all_consumers(V)        -> safe_call({get_all_consumers, V}).
+
 get_overview(User, R)       -> safe_call({get_overview, User, R}).
 get_overview(R)             -> safe_call({get_overview, all, R}).
 
@@ -213,15 +250,15 @@ safe_call(Term, Default) -> safe_call(Term, Default, 1).
 
 %% See rabbit_mgmt_sup_sup for a discussion of the retry logic.
 safe_call(Term, Default, Retries) ->
-    try
-        gen_server2:call({global, ?MODULE}, Term, infinity)
-    catch exit:{noproc, _} ->
-            case Retries of
-                0 -> Default;
-                _ -> rabbit_mgmt_sup_sup:start_child(),
-                     safe_call(Term, Default, Retries - 1)
-            end
-    end.
+    rabbit_misc:with_exit_handler(
+      fun () ->
+              case Retries of
+                  0 -> Default;
+                  _ -> rabbit_mgmt_sup_sup:start_child(),
+                       safe_call(Term, Default, Retries - 1)
+              end
+      end,
+      fun () -> gen_server2:call({global, ?MODULE}, Term, infinity) end).
 
 %%----------------------------------------------------------------------------
 %% Internal, gen_server2 callbacks
@@ -232,6 +269,7 @@ init([Ref]) ->
     %% that the management plugin work.
     process_flag(priority, high),
     {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+    {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
     rabbit_node_monitor:subscribe(self()),
     rabbit_log:info("Statistics database started.~n"),
     Table = fun () -> ets:new(rabbit_mgmt_db, [ordered_set]) end,
@@ -243,7 +281,8 @@ init([Ref]) ->
                     old_stats              = Table(),
                     aggregated_stats       = Table(),
                     aggregated_stats_index = Table(),
-                    event_refresh_ref      = Ref})), hibernate,
+                    event_refresh_ref      = Ref,
+                    rates_mode             = RatesMode})), hibernate,
      {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
 
 handle_call({augment_exchanges, Xs, Ranges, basic}, _From, State) ->
@@ -261,8 +300,8 @@ handle_call({augment_queues, Qs, Ranges, full}, _From, State) ->
 handle_call({augment_vhosts, VHosts, Ranges}, _From, State) ->
     reply(vhost_stats(Ranges, VHosts, State), State);
 
-handle_call({augment_nodes, Nodes}, _From, State) ->
-    {reply, node_stats(Nodes, State), State};
+handle_call({augment_nodes, Nodes, Ranges}, _From, State) ->
+    {reply, node_stats(Ranges, Nodes, State), State};
 
 handle_call({get_channel, Name, Ranges}, _From,
             State = #state{tables = Tables}) ->
@@ -290,6 +329,14 @@ handle_call({get_all_connections, Ranges}, _From,
     Conns = created_events(connection_stats, Tables),
     reply(connection_stats(Ranges, Conns, State), State);
 
+handle_call({get_all_consumers, VHost},
+            _From, State = #state{tables = Tables}) ->
+    All = ets:tab2list(orddict:fetch(consumers_by_queue, Tables)),
+    {reply, [augment_msg_stats(
+               augment_consumer(Obj), State) ||
+                {{#resource{virtual_host = VHostC}, _Ch, _CTag}, Obj} <- All,
+                VHost =:= all orelse VHost =:= VHostC], State};
+
 handle_call({get_overview, User, Ranges}, _From,
             State = #state{tables = Tables}) ->
     VHosts = case User of
@@ -300,8 +347,8 @@ handle_call({get_overview, User, Ranges}, _From,
     %% recv_oct now!
     VStats = [read_simple_stats(vhost_stats, VHost, State) ||
                  VHost <- VHosts],
-    MessageStats = [overview_sum(Type, VStats) || Type <- ?FINE_STATS],
-    QueueStats = [overview_sum(Type, VStats) || Type <- ?COARSE_QUEUE_STATS],
+    MessageStats = [overview_sum(Type, VStats) || Type <- ?MSG_RATES],
+    QueueStats = [overview_sum(Type, VStats) || Type <- ?QUEUE_MSG_COUNTS],
     F = case User of
             all -> fun (L) -> length(L) end;
             _   -> fun (L) -> length(rabbit_mgmt_util:filter_user(L, User)) end
@@ -322,7 +369,8 @@ handle_call({get_overview, User, Ranges}, _From,
          {channels,    F(created_events(channel_stats, Tables))}],
     reply([{message_stats, format_samples(Ranges, MessageStats, State)},
            {queue_totals,  format_samples(Ranges, QueueStats, State)},
-           {object_totals, ObjectTotals}], State);
+           {object_totals, ObjectTotals},
+           {statistics_db_event_queue, get(last_queue_length)}], State);
 
 handle_call({override_lookups, Lookups}, _From, State) ->
     reply(ok, State#state{lookups = Lookups});
@@ -330,6 +378,12 @@ handle_call({override_lookups, Lookups}, _From, State) ->
 handle_call(reset_lookups, _From, State) ->
     reply(ok, reset_lookups(State));
 
+%% Used in rabbit_mgmt_test_db where we need guarantees events have
+%% been handled before querying
+handle_call({event, Event = #event{reference = none}}, _From, State) ->
+    handle_event(Event, State),
+    reply(ok, State);
+
 handle_call(_Request, _From, State) ->
     reply(not_understood, State).
 
@@ -407,6 +461,7 @@ pget(Key, List) -> pget(Key, List, unknown).
 %% passed a queue proplist that will already have been formatted -
 %% i.e. it will have name and vhost keys.
 id_name(node_stats)       -> name;
+id_name(node_node_stats)  -> route;
 id_name(vhost_stats)      -> name;
 id_name(queue_stats)      -> name;
 id_name(exchange_stats)   -> name;
@@ -433,9 +488,9 @@ fine_stats_id(ChPid, {Q, X}) -> {ChPid, Q, X};
 fine_stats_id(ChPid, QorX)   -> {ChPid, QorX}.
 
 floor(TS, #state{interval = Interval}) ->
-    rabbit_mgmt_util:floor(rabbit_mgmt_format:timestamp_ms(TS), Interval).
+    rabbit_mgmt_util:floor(rabbit_mgmt_format:now_to_ms(TS), Interval).
 ceil(TS, #state{interval = Interval}) ->
-    rabbit_mgmt_util:ceil (rabbit_mgmt_format:timestamp_ms(TS), Interval).
+    rabbit_mgmt_util:ceil (rabbit_mgmt_format:now_to_ms(TS), Interval).
 
 details_key(Key) -> list_to_atom(atom_to_list(Key) ++ "_details").
 
@@ -447,9 +502,9 @@ handle_event(#event{type = queue_stats, props = Stats, timestamp = Timestamp},
              State) ->
     handle_stats(queue_stats, Stats, Timestamp,
                  [{fun rabbit_mgmt_format:properties/1,[backing_queue_status]},
-                  {fun rabbit_mgmt_format:timestamp/1, [idle_since]},
+                  {fun rabbit_mgmt_format:now_to_str/1, [idle_since]},
                   {fun rabbit_mgmt_format:queue_state/1, [state]}],
-                 [messages, messages_ready, messages_unacknowledged], State);
+                 ?QUEUE_MSG_COUNTS, ?QUEUE_MSG_RATES, State);
 
 handle_event(Event = #event{type = queue_deleted,
                             props = [{name, Name}],
@@ -464,8 +519,8 @@ handle_event(Event = #event{type = queue_deleted,
     %% This ceil must correspond to the ceil in append_samples/5
     TS = ceil(Timestamp, State),
     OldStats = lookup_element(OldTable, Id),
-    [record_sample(Id, {Key, -pget(Key, OldStats, 0), TS, State}, State)
-     || Key <- ?COARSE_QUEUE_STATS],
+    [record_sample(Id, {Key, -pget(Key, OldStats, 0), TS, State}, true, State)
+     || Key <- ?QUEUE_MSG_COUNTS],
     delete_samples(channel_queue_stats,  {'_', Name}, State),
     delete_samples(queue_exchange_stats, {Name, '_'}, State),
     delete_samples(queue_stats,          Name,        State),
@@ -480,8 +535,7 @@ handle_event(Event = #event{type = exchange_deleted,
 
 handle_event(#event{type = vhost_deleted,
                     props = [{name, Name}]}, State) ->
-    delete_samples(vhost_stats, Name, State),
-    {ok, State};
+    delete_samples(vhost_stats, Name, State);
 
 handle_event(#event{type = connection_created, props = Stats}, State) ->
     handle_created(
@@ -508,7 +562,7 @@ handle_event(#event{type = channel_created, props = Stats}, State) ->
 handle_event(#event{type = channel_stats, props = Stats, timestamp = Timestamp},
              State = #state{old_stats = OldTable}) ->
     handle_stats(channel_stats, Stats, Timestamp,
-                 [{fun rabbit_mgmt_format:timestamp/1, [idle_since]}],
+                 [{fun rabbit_mgmt_format:now_to_str/1, [idle_since]}],
                  [], State),
     ChPid = id(channel_stats, Stats),
     AllStats = [old_fine_stats(Type, Stats, State)
@@ -516,8 +570,7 @@ handle_event(#event{type = channel_stats, props = Stats, timestamp = Timestamp},
     ets:match_delete(OldTable, {{fine, {ChPid, '_'}},      '_'}),
     ets:match_delete(OldTable, {{fine, {ChPid, '_', '_'}}, '_'}),
     [handle_fine_stats(Timestamp, AllStatsElem, State)
-     || AllStatsElem <- AllStats],
-    {ok, State};
+     || AllStatsElem <- AllStats];
 
 handle_event(Event = #event{type = channel_closed,
                             props = [{pid, Pid}]},
@@ -545,15 +598,24 @@ handle_event(#event{type = consumer_deleted, props = Props}, State) ->
 %% TODO: we don't clear up after dead nodes here - this is a very tiny
 %% leak every time a node is permanently removed from the cluster. Do
 %% we care?
-handle_event(#event{type = node_stats, props = Stats, timestamp = Timestamp},
-             State = #state{tables = Tables}) ->
-    Table = orddict:fetch(node_stats, Tables),
-    ets:insert(Table, {{pget(name, Stats), stats},
-                       proplists:delete(name, Stats), Timestamp}),
-    {ok, State};
-
-handle_event(_Event, State) ->
-    {ok, State}.
+handle_event(#event{type = node_stats, props = Stats0, timestamp = Timestamp},
+             State) ->
+    Stats = proplists:delete(persister_stats, Stats0) ++
+        pget(persister_stats, Stats0),
+    handle_stats(node_stats, Stats, Timestamp, [], ?COARSE_NODE_STATS, State);
+
+handle_event(#event{type = node_node_stats, props = Stats,
+                    timestamp = Timestamp}, State) ->
+    handle_stats(node_node_stats, Stats, Timestamp, [], ?COARSE_NODE_NODE_STATS,
+                 State);
+
+handle_event(Event = #event{type  = node_node_deleted,
+                            props = [{route, Route}]}, State) ->
+    delete_samples(node_node_stats, Route, State),
+    handle_deleted(node_node_stats, Event, State);
+
+handle_event(_Event, _State) ->
+    ok.
 
 handle_created(TName, Stats, Funs, State = #state{tables = Tables}) ->
     Formatted = rabbit_mgmt_format:format(Stats, Funs),
@@ -562,12 +624,18 @@ handle_created(TName, Stats, Funs, State = #state{tables = Tables}) ->
                                               pget(name, Stats)}),
     {ok, State}.
 
-handle_stats(TName, Stats, Timestamp, Funs, RatesKeys,
+handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, State) ->
+    handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, [], State).
+
+handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, NoAggRatesKeys,
              State = #state{tables = Tables, old_stats = OldTable}) ->
     Id = id(TName, Stats),
     IdSamples = {coarse, {TName, Id}},
     OldStats = lookup_element(OldTable, IdSamples),
-    append_samples(Stats, Timestamp, OldStats, IdSamples, RatesKeys, State),
+    append_samples(
+      Stats, Timestamp, OldStats, IdSamples, RatesKeys, true, State),
+    append_samples(
+      Stats, Timestamp, OldStats, IdSamples, NoAggRatesKeys, false, State),
     StripKeys = [id_name(TName)] ++ RatesKeys ++ ?FINE_STATS_TYPES,
     Stats1 = [{K, V} || {K, V} <- Stats, not lists:member(K, StripKeys)],
     Stats2 = rabbit_mgmt_format:format(Stats1, Funs),
@@ -631,7 +699,7 @@ handle_fine_stat(Id, Stats, Timestamp, OldStats, State) ->
                  0 -> Stats;
                  _ -> [{deliver_get, Total}|Stats]
              end,
-    append_samples(Stats1, Timestamp, OldStats, {fine, Id}, all, State).
+    append_samples(Stats1, Timestamp, OldStats, {fine, Id}, all, true, State).
 
 delete_samples(Type, {Id, '_'}, State) ->
     delete_samples_with_index(Type, Id, fun forward/2, State);
@@ -655,7 +723,7 @@ reverse(A, B) -> {B, A}.
 
 delete_match(Type, Id) -> {{{Type, Id}, '_'}, '_'}.
 
-append_samples(Stats, TS, OldStats, Id, Keys,
+append_samples(Stats, TS, OldStats, Id, Keys, Agg,
                State = #state{old_stats = OldTable}) ->
     case ignore_coarse_sample(Id, State) of
         false ->
@@ -663,22 +731,26 @@ append_samples(Stats, TS, OldStats, Id, Keys,
             %% queue_deleted
             NewMS = ceil(TS, State),
             case Keys of
-                all -> [append_sample(Key, Value, NewMS, OldStats, Id, State)
-                        || {Key, Value} <- Stats];
-                _   -> [append_sample(
-                          Key, pget(Key, Stats), NewMS, OldStats, Id, State)
-                        || Key <- Keys]
+                all -> [append_sample(K, V, NewMS, OldStats, Id, Agg, State)
+                        || {K, V} <- Stats];
+                _   -> [append_sample(K, V, NewMS, OldStats, Id, Agg, State)
+                        || K <- Keys,
+                           V <- [pget(K, Stats)],
+                           V =/= 0 orelse lists:member(K, ?ALWAYS_REPORT_STATS)]
             end,
             ets:insert(OldTable, {Id, Stats});
         true ->
             ok
     end.
 
-append_sample(Key, Value, NewMS, OldStats, Id, State) when is_number(Value) ->
-    record_sample(
-      Id, {Key, Value - pget(Key, OldStats, 0), NewMS, State}, State);
-
-append_sample(_Key, _Value, _NewMS, _OldStats, _Id, _State) ->
+append_sample(Key, Val, NewMS, OldStats, Id, Agg, State) when is_number(Val) ->
+    OldVal = case pget(Key, OldStats, 0) of
+        N when is_number(N) -> N;
+        _                   -> 0
+    end,
+    record_sample(Id, {Key, Val - OldVal, NewMS, State}, Agg, State),
+    ok;
+append_sample(_Key, _Value, _NewMS, _OldStats, _Id, _Agg, _State) ->
     ok.
 
 ignore_coarse_sample({coarse, {queue_stats, Q}}, State) ->
@@ -686,12 +758,22 @@ ignore_coarse_sample({coarse, {queue_stats, Q}}, State) ->
 ignore_coarse_sample(_, _) ->
     false.
 
-record_sample({coarse, Id}, Args, State) ->
+%% Node stats do not have a vhost of course
+record_sample({coarse, {node_stats, _Node} = Id}, Args, true, _State) ->
+    record_sample0(Id, Args);
+
+record_sample({coarse, {node_node_stats, _Names} = Id}, Args, true, _State) ->
+    record_sample0(Id, Args);
+
+record_sample({coarse, Id}, Args, false, _State) ->
+    record_sample0(Id, Args);
+
+record_sample({coarse, Id}, Args, true, State) ->
     record_sample0(Id, Args),
     record_sample0({vhost_stats, vhost(Id, State)}, Args);
 
 %% Deliveries / acks (Q -> Ch)
-record_sample({fine, {Ch, Q = #resource{kind = queue}}}, Args, State) ->
+record_sample({fine, {Ch, Q = #resource{kind = queue}}}, Args, true, State) ->
     case object_exists(Q, State) of
         true  -> record_sample0({channel_queue_stats, {Ch, Q}}, Args),
                  record_sample0({queue_stats,         Q},       Args);
@@ -701,7 +783,7 @@ record_sample({fine, {Ch, Q = #resource{kind = queue}}}, Args, State) ->
     record_sample0({vhost_stats,   vhost(Q)}, Args);
 
 %% Publishes / confirms (Ch -> X)
-record_sample({fine, {Ch, X = #resource{kind = exchange}}}, Args, State) ->
+record_sample({fine, {Ch, X = #resource{kind = exchange}}}, Args, true,State) ->
     case object_exists(X, State) of
         true  -> record_sample0({channel_exchange_stats, {Ch, X}}, Args),
                  record_sampleX(publish_in,              X,        Args);
@@ -713,7 +795,7 @@ record_sample({fine, {Ch, X = #resource{kind = exchange}}}, Args, State) ->
 %% Publishes (but not confirms) (Ch -> X -> Q)
 record_sample({fine, {_Ch,
                       Q = #resource{kind = queue},
-                      X = #resource{kind = exchange}}}, Args, State) ->
+                      X = #resource{kind = exchange}}}, Args, true, State) ->
     %% TODO This one logically feels like it should be here. It would
     %% correspond to "publishing channel message rates to queue" -
     %% which would be nice to handle - except we don't. And just
@@ -770,6 +852,11 @@ record_sampleX(RenamePublishTo, X, {publish, Diff, TS, State}) ->
 record_sampleX(_RenamePublishTo, X, {Type, Diff, TS, State}) ->
     record_sample0({exchange_stats, X}, {Type, Diff, TS, State}).
 
+%% Ignore case where ID1 and ID2 are in a tuple, i.e. detailed stats,
+%% when in basic mode
+record_sample0({Type, {_ID1, _ID2}}, {_, _, _, #state{rates_mode = basic}})
+  when Type =/= node_node_stats ->
+    ok;
 record_sample0(Id0, {Key, Diff, TS, #state{aggregated_stats       = ETS,
                                            aggregated_stats_index = ETSi}}) ->
     Id = {Id0, Key},
@@ -802,6 +889,9 @@ record_sample0(Id0, {Key, Diff, TS, #state{aggregated_stats       = ETS,
         {channel_stats, [{publishes,  channel_exchange_stats, fun first/1},
                          {deliveries, channel_queue_stats,    fun first/1}]}).
 
+-define(NODE_DETAILS,
+        {node_stats, [{cluster_links, node_node_stats, fun first/1}]}).
+
 first(Id)  -> {Id, '$1'}.
 second(Id) -> {'$1', Id}.
 
@@ -853,13 +943,23 @@ detail_channel_stats(Ranges, Objs, State) ->
 vhost_stats(Ranges, Objs, State) ->
     merge_stats(Objs, [simple_stats_fun(Ranges, vhost_stats, State)]).
 
-node_stats(Objs, State) ->
-    merge_stats(Objs, [basic_stats_fun(node_stats, State)]).
+node_stats(Ranges, Objs, State) ->
+    merge_stats(Objs, [basic_stats_fun(node_stats, State),
+                       simple_stats_fun(Ranges, node_stats, State),
+                       detail_and_basic_stats_fun(
+                         node_node_stats, Ranges, ?NODE_DETAILS, State)]).
 
 merge_stats(Objs, Funs) ->
-    [lists:foldl(fun (Fun, Props) -> Fun(Props) ++ Props end, Obj, Funs)
+    [lists:foldl(fun (Fun, Props) -> combine(Fun(Props), Props) end, Obj, Funs)
      || Obj <- Objs].
 
+combine(New, Old) ->
+    case pget(state, Old) of
+        unknown -> New ++ Old;
+        live    -> New ++ proplists:delete(state, Old);
+        _       -> proplists:delete(state, New) ++ Old
+    end.
+
 %% i.e. the non-calculated stats
 basic_stats_fun(Type, #state{tables = Tables}) ->
     Table = orddict:fetch(Type, Tables),
@@ -884,6 +984,28 @@ detail_stats_fun(Ranges, {IdType, FineSpecs}, State) ->
              || {Name, AggregatedStatsType, IdFun} <- FineSpecs]
     end.
 
+%% This does not quite do the same as detail_stats_fun +
+%% basic_stats_fun; the basic part here assumes compound keys (like
+%% detail stats) but non-calculated (like basic stats). Currently the
+%% only user of that is node-node stats.
+%%
+%% We also assume that FineSpecs is single length here (at [1]).
+detail_and_basic_stats_fun(Type, Ranges, {IdType, FineSpecs},
+                           State = #state{tables = Tables}) ->
+    Table = orddict:fetch(Type, Tables),
+    F = detail_stats_fun(Ranges, {IdType, FineSpecs}, State),
+    fun (Props) ->
+            Id = id_lookup(IdType, Props),
+            BasicStatsRaw = ets:match(Table, {{{Id, '$1'}, stats}, '$2', '_'}),
+            BasicStatsDict = dict:from_list([{K, V} || [K,V] <- BasicStatsRaw]),
+            [{K, Items}] = F(Props), %% [1]
+            Items2 = [case dict:find(id_lookup(IdType, Item), BasicStatsDict) of
+                          {ok, BasicStats} -> BasicStats ++ Item;
+                          error            -> Item
+                      end || Item <- Items],
+            [{K, Items2}]
+    end.
+
 read_simple_stats(Type, Id, #state{aggregated_stats = ETS}) ->
     FromETS = ets:match(ETS, {{{Type, Id}, '$1'}, '$2'}),
     [{K, V} || [K, V] <- FromETS].
@@ -903,7 +1025,7 @@ read_detail_stats(Type, Id, #state{aggregated_stats = ETS}) ->
       end, [], FromETS).
 
 extract_msg_stats(Stats) ->
-    FineStats = lists:append([[K, details_key(K)] || K <- ?FINE_STATS]),
+    FineStats = lists:append([[K, details_key(K)] || K <- ?MSG_RATES]),
     {MsgStats, Other} =
         lists:partition(fun({K, _}) -> lists:member(K, FineStats) end, Stats),
     case MsgStats of
@@ -920,12 +1042,14 @@ format_detail_id(ChPid, State) when is_pid(ChPid) ->
     augment_msg_stats([{channel, ChPid}], State);
 format_detail_id(#resource{name = Name, virtual_host = Vhost, kind = Kind},
                  _State) ->
-    [{Kind, [{name, Name}, {vhost, Vhost}]}].
+    [{Kind, [{name, Name}, {vhost, Vhost}]}];
+format_detail_id(Node, _State) when is_atom(Node) ->
+    [{name, Node}].
 
 format_samples(Ranges, ManyStats, #state{interval = Interval}) ->
     lists:append(
       [case rabbit_mgmt_stats:is_blank(Stats) andalso
-           not lists:member(K, ?COARSE_QUEUE_STATS) of
+           not lists:member(K, ?ALWAYS_REPORT_STATS) of
            true  -> [];
            false -> {Details, Counter} = rabbit_mgmt_stats:format(
                                            pick_range(K, Ranges),
@@ -934,13 +1058,16 @@ format_samples(Ranges, ManyStats, #state{interval = Interval}) ->
                      {details_key(K), Details}]
        end || {K, Stats} <- ManyStats]).
 
-pick_range(K, {RangeL, RangeM, RangeD}) ->
-    case {lists:member(K, ?COARSE_QUEUE_STATS),
-          lists:member(K, ?FINE_STATS),
-          lists:member(K, ?COARSE_CONN_STATS)} of
-        {true, false, false} -> RangeL;
-        {false, true, false} -> RangeM;
-        {false, false, true} -> RangeD
+pick_range(K, {RangeL, RangeM, RangeD, RangeN}) ->
+    case {lists:member(K, ?QUEUE_MSG_COUNTS),
+          lists:member(K, ?MSG_RATES),
+          lists:member(K, ?COARSE_CONN_STATS),
+          lists:member(K, ?COARSE_NODE_STATS)
+          orelse lists:member(K, ?COARSE_NODE_NODE_STATS)} of
+        {true, false, false, false} -> RangeL;
+        {false, true, false, false} -> RangeM;
+        {false, false, true, false} -> RangeD;
+        {false, false, false, true} -> RangeN
     end.
 
 %% We do this when retrieving the queue record rather than when
@@ -1027,6 +1154,7 @@ augment_channel_pid(Pid, #state{tables = Tables}) ->
                           {pget(connection, Ch), create}),
     [{name,            pget(name,   Ch)},
      {number,          pget(number, Ch)},
+     {user,            pget(user,   Ch)},
      {connection_name, pget(name,         Conn)},
      {peer_port,       pget(peer_port,    Conn)},
      {peer_host,       pget(peer_host,    Conn)}].
@@ -1052,14 +1180,14 @@ gc_batch(State = #state{aggregated_stats = ETS}) ->
 gc_batch(0, _Policies, State) ->
     State;
 gc_batch(Rows, Policies, State = #state{aggregated_stats = ETS,
-                         gc_next_key      = Key0}) ->
+                                        gc_next_key      = Key0}) ->
     Key = case Key0 of
               undefined -> ets:first(ETS);
               _         -> ets:next(ETS, Key0)
           end,
     Key1 = case Key of
                '$end_of_table' -> undefined;
-               _               -> Now = floor(erlang:now(), State),
+               _               -> Now = floor(os:timestamp(), State),
                                   Stats = ets:lookup_element(ETS, Key, 2),
                                   gc(Key, Stats, Policies, Now, ETS),
                                   Key
@@ -1073,6 +1201,8 @@ gc({{Type, Id}, Key}, Stats, Policies, Now, ETS) ->
         Stats2 -> ets:insert(ETS, {{{Type, Id}, Key}, Stats2})
     end.
 
+retention_policy(node_stats)             -> global;
+retention_policy(node_node_stats)        -> global;
 retention_policy(vhost_stats)            -> global;
 retention_policy(queue_stats)            -> basic;
 retention_policy(exchange_stats)         -> basic;
index bcfcfc6de621c6340655a8eaa075aba50a8e41da..e42635e1dd2cf16adbaed0c562fd0d1a605b1bd8 100644 (file)
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_dispatcher).
 
--export([modules/0, build_dispatcher/0]).
+-export([modules/1, build_dispatcher/1]).
 
 -behaviour(rabbit_mgmt_extension).
 -export([dispatcher/0, web_ui/0]).
 
-build_dispatcher() ->
+build_dispatcher(Ignore) ->
     [{["api" | Path], Mod, Args} ||
         {Path, Mod, Args} <-
-            lists:append([Module:dispatcher() || Module <- modules()])].
+            lists:append([Module:dispatcher() || Module <- modules(Ignore)])].
 
-modules() ->
-    [Module || {Module, Behaviours} <-
+modules(IgnoreApps) ->
+    [Module || {App, Module, Behaviours} <-
                    rabbit_misc:all_module_attributes(behaviour),
+               not lists:member(App, IgnoreApps),
                lists:member(rabbit_mgmt_extension, Behaviours)].
 
 %%----------------------------------------------------------------------------
@@ -55,6 +56,8 @@ dispatcher() ->
      {["connections", connection, "channels"],                     rabbit_mgmt_wm_connection_channels, []},
      {["channels"],                                                rabbit_mgmt_wm_channels, []},
      {["channels", channel],                                       rabbit_mgmt_wm_channel, []},
+     {["consumers"],                                               rabbit_mgmt_wm_consumers, []},
+     {["consumers", vhost],                                        rabbit_mgmt_wm_consumers, []},
      {["exchanges"],                                               rabbit_mgmt_wm_exchanges, []},
      {["exchanges", vhost],                                        rabbit_mgmt_wm_exchanges, []},
      {["exchanges", vhost, exchange],                              rabbit_mgmt_wm_exchange, []},
index 5ecc38da47be90af471f668dd94767cb55820747..b558c4025b6b1408471318f3ff0955813a84c80b 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_extension).
index 487ff860e622c356b3e9c4d63f11218cff363ee4..b16d13136f0e22c7553ea333856870ca4c6a1360 100644 (file)
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_format).
 
 -export([format/2, print/2, remove/1, ip/1, ipb/1, amqp_table/1, tuple/1]).
--export([parameter/1, timestamp/1, timestamp_ms/1, strip_pids/1]).
+-export([parameter/1, now_to_str/1, now_to_str_ms/1, now_to_ms/1, strip_pids/1]).
 -export([node_from_pid/1, protocol/1, resource/1, queue/1, queue_state/1]).
 -export([exchange/1, user/1, internal_user/1, binding/1, url/2]).
 -export([pack_binding_props/2, tokenise/1]).
@@ -124,17 +124,22 @@ protocol_version({Major, Minor, 0})        -> protocol_version({Major, Minor});
 protocol_version({Major, Minor, Revision}) -> io_lib:format("~B-~B-~B",
                                                     [Major, Minor, Revision]).
 
-timestamp_ms(unknown) ->
+now_to_ms(unknown) ->
     unknown;
-timestamp_ms(Timestamp) ->
-    timer:now_diff(Timestamp, {0,0,0}) div 1000.
+now_to_ms(Now) ->
+    timer:now_diff(Now, {0,0,0}) div 1000.
 
-timestamp(unknown) ->
+now_to_str(unknown) ->
     unknown;
-timestamp(Timestamp) ->
-    {{Y, M, D}, {H, Min, S}} = calendar:now_to_local_time(Timestamp),
+now_to_str(Now) ->
+    {{Y, M, D}, {H, Min, S}} = calendar:now_to_local_time(Now),
     print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w", [Y, M, D, H, Min, S]).
 
+now_to_str_ms(unknown) ->
+    unknown;
+now_to_str_ms(Now = {_, _, Micro}) ->
+    print("~s:~3.3.0w", [now_to_str(Now), Micro div 1000]).
+
 resource(unknown) -> unknown;
 resource(Res)     -> resource(name, Res).
 
@@ -152,9 +157,8 @@ internal_user(User) ->
      {tags,          tags(User#internal_user.tags)}].
 
 user(User) ->
-    [{name,         User#user.username},
-     {tags,         tags(User#user.tags)},
-     {auth_backend, User#user.auth_backend}].
+    [{name, User#user.username},
+     {tags, tags(User#user.tags)}].
 
 tags(Tags) ->
     list_to_binary(string:join([atom_to_list(T) || T <- Tags], ",")).
@@ -227,14 +231,16 @@ queue(#amqqueue{name            = Name,
                 auto_delete     = AutoDelete,
                 exclusive_owner = ExclusiveOwner,
                 arguments       = Arguments,
-                pid             = Pid}) ->
+                pid             = Pid,
+                state           = State}) ->
     format(
       [{name,        Name},
        {durable,     Durable},
        {auto_delete, AutoDelete},
        {owner_pid,   ExclusiveOwner},
        {arguments,   Arguments},
-       {pid,         Pid}],
+       {pid,         Pid},
+       {state,       State}],
       [{fun resource/1,   [name]},
        {fun amqp_table/1, [arguments]},
        {fun policy/1,     [policy]}]).
index 95fadfeacff3d79db119cb4365e49f4f29b4456d..d12f545261dae14613085fabe6ce2d4b714bad08 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_load_definitions).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_reset_handler.erl b/rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_reset_handler.erl
new file mode 100644 (file)
index 0000000..e16351f
--- /dev/null
@@ -0,0 +1,86 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+%% When management extensions are enabled and/or disabled at runtime, the
+%% management web dispatch mechanism needs to be reset. This event handler
+%% deals with responding to 'plugins_changed' events for management
+%% extensions, forcing a reset when necessary.
+
+-module(rabbit_mgmt_reset_handler).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+         terminate/2, code_change/3]).
+
+-rabbit_boot_step({?MODULE,
+                   [{description, "management extension handling"},
+                    {mfa,         {gen_event, add_handler,
+                                   [rabbit_event, ?MODULE, []]}},
+                    {cleanup,     {gen_event, delete_handler,
+                                   [rabbit_event, ?MODULE, []]}},
+                    {requires,    rabbit_event},
+                    {enables,     recovery}]}).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+    {ok, []}.
+
+handle_call(_Request, State) ->
+    {ok, not_understood, State}.
+
+handle_event(#event{type = plugins_changed, props = Details}, State) ->
+    Enabled = pget(enabled, Details),
+    Disabled = pget(disabled, Details),
+    case extensions_changed(Enabled ++ Disabled) of
+        true  -> rabbit_mgmt_app:reset_dispatcher(Disabled);
+        false -> ok
+    end,
+    {ok, State};
+
+handle_event(_Event, State) ->
+    {ok, State}.
+
+handle_info(_Info, State) ->
+    {ok, State}.
+
+terminate(_Arg, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+%% We explicitly ignore the case where management has been
+%% started/stopped since the dispatcher is either freshly created or
+%% about to vanish.
+extensions_changed(Apps) ->
+    not lists:member(rabbitmq_management, Apps) andalso
+        lists:any(fun is_extension/1, [Mod || App <- Apps, Mod <- mods(App)]).
+
+is_extension(Mod) ->
+    lists:member(rabbit_mgmt_extension,
+                 pget(behaviour, Mod:module_info(attributes), [])).
+
+mods(App) ->
+    {ok, Modules} = application:get_key(App, modules),
+    Modules.
index 74a4cc9835a598517257f994c0d08d6aace2393c..3e0c8a261f0da45f4ca4a5e43c768787dfe4c77a 100644 (file)
@@ -45,7 +45,7 @@ record(TS, Diff, Stats = #stats{diffs = Diffs}) ->
 %%----------------------------------------------------------------------------
 
 format(no_range, #stats{diffs = Diffs, base = Base}, Interval) ->
-    Now = rabbit_mgmt_format:timestamp_ms(erlang:now()),
+    Now = rabbit_mgmt_format:now_to_ms(os:timestamp()),
     RangePoint = ((Now div Interval) * Interval) - Interval,
     Count = sum_entire_tree(gb_trees:iterator(Diffs), Base),
     {[{rate, format_rate(
index 008dd581cf2228f9d313e2ae5a112b19534495d8..992ff722629b0dfc7c0f1152844efa02fe42267d 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_sup).
index d83fb3b0d41424ed57bce775079bf5a5c67c14a5..6758a5d3e23177f6258acaee11d646950c82d327 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_sup_sup).
index ad4509276100509eca3e6fcf7a2c469e9092f6f9..2a151017339a360c8e1bc941cda43f498792ef0d 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_util).
@@ -28,8 +28,9 @@
 -export([with_channel/4, with_channel/5]).
 -export([props_to_method/2, props_to_method/4]).
 -export([all_or_one_vhost/2, http_to_amqp/5, reply/3, filter_vhost/3]).
--export([filter_conn_ch_list/3, filter_user/2, list_login_vhosts/1]).
--export([with_decode/5, decode/1, decode/2, redirect/2, args/1]).
+-export([filter_conn_ch_list/3, filter_user/2, list_login_vhosts/2]).
+-export([with_decode/5, decode/1, decode/2, redirect/2, set_resp_header/3,
+         args/1]).
 -export([reply_list/3, reply_list/4, sort_list/2, destination_type/1]).
 -export([post_respond/1, columns/1, is_monitor/1]).
 -export([list_visible_vhosts/1, b64decode_or_throw/1, no_range/0, range/1,
@@ -40,6 +41,9 @@
 -include("rabbit_mgmt.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 
+-include_lib("webmachine/include/wm_reqdata.hrl").
+-include_lib("webmachine/include/wm_reqstate.hrl").
+
 -define(FRAMING, rabbit_framing_amqp_0_9_1).
 
 %%--------------------------------------------------------------------
@@ -73,7 +77,7 @@ user_matches_vhost(ReqData, User) ->
     case vhost(ReqData) of
         not_found -> true;
         none      -> true;
-        V         -> lists:member(V, list_login_vhosts(User))
+        V         -> lists:member(V, list_login_vhosts(User, peersock(ReqData)))
     end.
 
 %% Used for connections / channels. A normal user can only see / delete
@@ -116,11 +120,7 @@ is_authorized(ReqData, Context, Username, Password, ErrorMsg, Fun) ->
              end,
     case rabbit_access_control:check_user_pass_login(Username, Password) of
         {ok, User = #user{tags = Tags}} ->
-            IPStr = wrq:peer(ReqData),
-            %% inet_parse:address/1 is an undocumented function but
-            %% exists in old versions of Erlang. inet:parse_address/1
-            %% is a documented wrapper round it but introduced in R16B.
-            {ok, IP} = inet_parse:address(IPStr),
+            IP = peer(ReqData),
             case rabbit_access_control:check_user_loopback(Username, IP) of
                 ok ->
                     case is_mgmt_user(Tags) of
@@ -137,12 +137,26 @@ is_authorized(ReqData, Context, Username, Password, ErrorMsg, Fun) ->
                 not_allowed ->
                     ErrFun(<<"User can only log in via localhost">>)
             end;
-        {refused, Msg, Args} ->
+        {refused, _Username, Msg, Args} ->
             rabbit_log:warning("HTTP access denied: ~s~n",
                                [rabbit_misc:format(Msg, Args)]),
             not_authorised(<<"Login failed">>, ReqData, Context)
     end.
 
+peer(ReqData) ->
+    {ok, {IP,_Port}} = peername(peersock(ReqData)),
+    IP.
+
+%% We can't use wrq:peer/1 because that trusts X-Forwarded-For.
+peersock(ReqData) ->
+    WMState = ReqData#wm_reqdata.wm_state,
+    WMState#wm_reqstate.socket.
+
+%% Like the one in rabbit_net, but we and webmachine have a different
+%% way of wrapping
+peername(Sock) when is_port(Sock) -> inet:peername(Sock);
+peername({ssl, SSL})              -> ssl:peername(SSL).
+
 vhost(ReqData) ->
     case id(vhost, ReqData) of
         none  -> none;
@@ -162,7 +176,7 @@ reply(Facts, ReqData, Context) ->
     reply0(extract_columns(Facts, ReqData), ReqData, Context).
 
 reply0(Facts, ReqData, Context) ->
-    ReqData1 = wrq:set_resp_header("Cache-Control", "no-cache", ReqData),
+    ReqData1 = set_resp_header("Cache-Control", "no-cache", ReqData),
     try
         {mochijson2:encode(Facts), ReqData1, Context}
     catch exit:{json_encode, E} ->
@@ -441,6 +455,8 @@ with_channel(VHost, ReqData,
             end;
         {error, {auth_failure, Msg}} ->
             not_authorised(Msg, ReqData, Context);
+        {error, access_refused} ->
+            not_authorised(<<"Access refused.">>, ReqData, Context);
         {error, {nodedown, N}} ->
             bad_request(
               list_to_binary(
@@ -459,8 +475,8 @@ all_or_one_vhost(ReqData, Fun) ->
         VHost     -> Fun(VHost)
     end.
 
-filter_vhost(List, _ReqData, Context) ->
-    VHosts = list_login_vhosts(Context#context.user),
+filter_vhost(List, ReqData, Context) ->
+    VHosts = list_login_vhosts(Context#context.user, peersock(ReqData)),
     [I || I <- List, lists:member(pget(vhost, I), VHosts)].
 
 filter_user(List, _ReqData, #context{user = User}) ->
@@ -482,8 +498,13 @@ filter_conn_ch_list(List, ReqData, Context) ->
 
 redirect(Location, ReqData) ->
     wrq:do_redirect(true,
-                    wrq:set_resp_header("Location",
-                                        binary_to_list(Location), ReqData)).
+                    set_resp_header("Location",
+                                    binary_to_list(Location), ReqData)).
+
+set_resp_header(K, V, ReqData) ->
+    wrq:set_resp_header(K, strip_crlf(V), ReqData).
+
+strip_crlf(Str) -> lists:append(string:tokens(Str, "\r\n")).
 
 args({struct, L}) -> args(L);
 args(L)           -> rabbit_mgmt_format:to_amqp_table(L).
@@ -494,8 +515,8 @@ post_respond({true, ReqData, Context}) ->
 post_respond({{halt, Code}, ReqData, Context}) ->
     {{halt, Code}, ReqData, Context};
 post_respond({JSON, ReqData, Context}) ->
-    {true, wrq:set_resp_header(
-             "content-type", "application/json",
+    {true, set_resp_header(
+             "Content-Type", "application/json",
              wrq:append_to_response_body(JSON, ReqData)), Context}.
 
 is_admin(T)       -> intersects(T, [administrator]).
@@ -517,12 +538,12 @@ intersects(A, B) -> lists:any(fun(I) -> lists:member(I, B) end, A).
 list_visible_vhosts(User = #user{tags = Tags}) ->
     case is_monitor(Tags) of
         true  -> rabbit_vhost:list();
-        false -> list_login_vhosts(User)
+        false -> list_login_vhosts(User, undefined)
     end.
 
-list_login_vhosts(User) ->
+list_login_vhosts(User, Sock) ->
     [V || V <- rabbit_vhost:list(),
-          case catch rabbit_access_control:check_vhost_access(User, V) of
+          case catch rabbit_access_control:check_vhost_access(User, V, Sock) of
               ok -> true;
               _  -> false
           end].
@@ -536,25 +557,33 @@ b64decode_or_throw(B64) ->
             throw({error, {not_base64, B64}})
     end.
 
-no_range() -> {no_range, no_range, no_range}.
+no_range() -> {no_range, no_range, no_range, no_range}.
 
 %% Take floor on queries so we make sure we only return samples
 %% for which we've finished receiving events. Fixes the "drop at
 %% the end" problem.
 range(ReqData) -> {range("lengths",    fun floor/2, ReqData),
                    range("msg_rates",  fun floor/2, ReqData),
-                   range("data_rates", fun floor/2, ReqData)}.
+                   range("data_rates", fun floor/2, ReqData),
+                   range("node_stats", fun floor/2, ReqData)}.
 
 %% ...but if we know only one event could have contributed towards
 %% what we are interested in, then let's take the ceiling instead and
-%% get slightly fresher data.
+%% get slightly fresher data that will match up with any
+%% non-historical data we have (e.g. queue length vs queue messages in
+%% RAM, they should both come from the same snapshot or we might
+%% report more messages in RAM than total).
 %%
-%% Why does msg_rates still use floor/2? Because in the cases where we
-%% call this function (for connections and queues) the msg_rates are still
-%% aggregated even though the lengths and data rates aren't.
+%% However, we only do this for queue lengths since a) it's the only
+%% thing where this ends up being really glaring and b) for other
+%% numbers we care more about the rate than the absolute value, and if
+%% we use ceil() we stand a 50:50 chance of looking up the last sample
+%% in the range before we get it, and thus deriving an instantaneous
+%% rate of 0.0.
 range_ceil(ReqData) -> {range("lengths",    fun ceil/2,  ReqData),
                         range("msg_rates",  fun floor/2, ReqData),
-                        range("data_rates", fun ceil/2,  ReqData)}.
+                        range("data_rates", fun floor/2,  ReqData),
+                        range("node_stats", fun floor/2,  ReqData)}.
 
 range(Prefix, Round, ReqData) ->
     Age0 = int(Prefix ++ "_age", ReqData),
@@ -563,7 +592,7 @@ range(Prefix, Round, ReqData) ->
         is_integer(Age0) andalso is_integer(Incr0) ->
             Age = Age0 * 1000,
             Incr = Incr0 * 1000,
-            Now = rabbit_mgmt_format:timestamp_ms(erlang:now()),
+            Now = rabbit_mgmt_format:now_to_ms(os:timestamp()),
             Last = Round(Now, Incr),
             #range{first = (Last - Age),
                    last  = Last,
index 445eee59632293ea7505fa9ddf7b7691d914793f..fec619609a2eb66dcca7768723ebd641381310ad 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_aliveness_test).
index d79455845d36f0ff1ffb07aa536dfdcef476b239..16e42ae3b97e851d92344db5fa0f16496122a4aa 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_binding).
index c452038bbaa15b07c0abbe0cff9a61b646fc2d82..4f8077819491ff6d72ee5c6ba77ecaabcb7dd515 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_bindings).
@@ -81,8 +81,8 @@ accept_content(ReqData, {_Mode, Context}) ->
                         "/api/bindings/~s/e/~s/~s/~s/~s",
                         [VHost, Source, DestType, Dest,
                          rabbit_mgmt_format:pack_binding_props(Key, Args)]))),
-            ReqData2 = wrq:set_resp_header("Location", Loc, ReqData),
-            {true, ReqData2, Context2}
+            {true, rabbit_mgmt_util:set_resp_header("Location", Loc, ReqData),
+             Context2}
     end.
 
 is_authorized(ReqData, {Mode, Context}) ->
index 67e584f28957af4800c2ef0779906084ea708cc8..641043380c9b479e2a24c110b2e7490f21ef9ecc 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_channel).
index 0274c8d285f641e19290a3a83bd84a824ed14cc7..6675fa2977e70585b11c703c14a6d7d203991d16 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_channels).
index e3d2c43169115bfbf545a1440e71abf6e09acf4f..f74182175be1f37318058b8984d4dd3c26f0e0cd 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_cluster_name).
index 7f918b1e3aaf3ea4ab5e07f3471d6f6eb190c6a3..523bec2fa7202decf046c1ec8c713eecfce02575 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_connection).
index c15b977709ede4492a1e122d04af5cbbe03b8cb0..9becc91e49ef31caeda889bdd9e8a237858ce1ab 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_connection_channels).
index 85689eb3649459fc8d4baf92a7447509f6233cc4..1263d9103df7883f55e0a67af3f5f0047608f915 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_connections).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_consumers.erl b/rabbitmq-server/plugins-src/rabbitmq-management/src/rabbit_mgmt_wm_consumers.erl
new file mode 100644 (file)
index 0000000..3d791d0
--- /dev/null
@@ -0,0 +1,56 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Plugin.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+
+-module(rabbit_mgmt_wm_consumers).
+
+-export([init/1, to_json/2, content_types_provided/2, resource_exists/2,
+         is_authorized/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+    {case rabbit_mgmt_util:vhost(ReqData) of
+         vhost_not_found -> false;
+         _               -> true
+     end, ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User}) ->
+    Consumers = case rabbit_mgmt_util:vhost(ReqData) of
+                    none  -> rabbit_mgmt_db:get_all_consumers();
+                    VHost -> rabbit_mgmt_db:get_all_consumers(VHost)
+                end,
+    rabbit_mgmt_util:reply_list(
+      filter_user(Consumers, User), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+    rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+filter_user(List, #user{username = Username, tags = Tags}) ->
+    case rabbit_mgmt_util:is_monitor(Tags) of
+        true  -> List;
+        false -> [I || I <- List,
+                       pget(user, pget(channel_details, I)) == Username]
+    end.
index d62f7c8b3335c815fede8df3341774495c5d801f..a33d6a1cf16bd5f12e431bbc1cf448adef1cf851 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_definitions).
@@ -69,7 +69,7 @@ to_json(ReqData, Context) ->
          {bindings,    Bs}]),
       case wrq:get_qs_value("download", ReqData) of
           undefined -> ReqData;
-          Filename  -> wrq:set_resp_header(
+          Filename  -> rabbit_mgmt_util:set_resp_header(
                          "Content-Disposition",
                          "attachment; filename=" ++
                              mochiweb_util:unquote(Filename), ReqData)
index 711756ef597812f2bccd85b8a86ebfcc2457b368..374280ab7c00dcfc584a85ef4836460b919953fc 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_exchange).
@@ -55,9 +55,11 @@ accept_content(ReqData, Context) ->
       [{exchange, rabbit_mgmt_util:id(exchange, ReqData)}]).
 
 delete_resource(ReqData, Context) ->
+    IfUnused = "true" =:= wrq:get_qs_value("if-unused", ReqData),
     rabbit_mgmt_util:amqp_request(
       rabbit_mgmt_util:vhost(ReqData), ReqData, Context,
-      #'exchange.delete'{ exchange = id(ReqData) }).
+      #'exchange.delete'{exchange  = id(ReqData),
+                         if_unused = IfUnused}).
 
 is_authorized(ReqData, Context) ->
     rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
index e1cc29e605a7d605e39a602a623d048fed77f38b..467c055be76f3274f96893030083c810f3321c81 100644 (file)
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_exchange_publish).
 
 -export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
-         allowed_methods/2, process_post/2]).
+         allowed_methods/2,  content_types_provided/2, process_post/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
@@ -29,6 +29,9 @@ init(_Config) -> {ok, #context{}}.
 allowed_methods(ReqData, Context) ->
     {['POST'], ReqData, Context}.
 
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_wm_exchange:exchange(ReqData) of
          not_found -> false;
index f5cdf7d7518e9333bf8d7235482570cb45b36dee..f1df46a0838c6bceda397d5221f32c073561cddc 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_exchanges).
index bb0f15e4c4739bf2fa7ba75fdf3e3d8aecb20789..b38bf1d795a436a103423c6e4f83500030aca167 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_extensions).
@@ -29,7 +29,7 @@ content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
 to_json(ReqData, Context) ->
-    Modules = rabbit_mgmt_dispatcher:modules(),
+    Modules = rabbit_mgmt_dispatcher:modules([]),
     rabbit_mgmt_util:reply(
       [Module:web_ui() || Module <- Modules], ReqData, Context).
 
index 27522f63eeb51bc27e2e4008eca38318327751c1..646d1838a0f10675e1fa0aff4f5103c66b5d3aa5 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_node).
@@ -45,19 +45,23 @@ is_authorized(ReqData, Context) ->
 %%--------------------------------------------------------------------
 
 node0(ReqData) ->
-    Name = list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))),
-    case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(),
-               proplists:get_value(name, N) == Name] of
+    Node = list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))),
+    case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+               proplists:get_value(name, N) == Node] of
         []     -> not_found;
-        [Node] -> augment(ReqData, Name, Node)
+        [Data] -> augment(ReqData, Node, Data)
     end.
 
-augment(ReqData, Name, Node) ->
-    case wrq:get_qs_value("memory", ReqData) of
-        "true" -> Mem = case rpc:call(Name, rabbit_vm, memory, [], infinity) of
+augment(ReqData, Node, Data) ->
+    lists:foldl(fun (Key, DataN) -> augment(Key, ReqData, Node, DataN) end,
+                Data, [memory, binary]).
+
+augment(Key, ReqData, Node, Data) ->
+    case wrq:get_qs_value(atom_to_list(Key), ReqData) of
+        "true" -> Res = case rpc:call(Node, rabbit_vm, Key, [], infinity) of
                             {badrpc, _} -> not_available;
-                            Memory      -> Memory
+                            Result      -> Result
                         end,
-                  [{memory, Mem} | Node];
-        _      -> Node
+                  [{Key, Res} | Data];
+        _      -> Data
     end.
index 0f5b5327c0a44fcdcdb9241261851725d8a44006..fb86e1efb3a56597301d9064b2326aecb3be8744 100644 (file)
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_nodes).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
--export([all_nodes/0]).
+-export([all_nodes/1, all_nodes_raw/0]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
@@ -31,18 +31,21 @@ content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
 to_json(ReqData, Context) ->
-    rabbit_mgmt_util:reply_list(all_nodes(), ReqData, Context).
+    rabbit_mgmt_util:reply_list(all_nodes(ReqData), ReqData, Context).
 
 is_authorized(ReqData, Context) ->
     rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
 
 %%--------------------------------------------------------------------
 
-all_nodes() ->
+all_nodes(ReqData) ->
+    rabbit_mgmt_db:augment_nodes(
+      all_nodes_raw(), rabbit_mgmt_util:range_ceil(ReqData)).
+
+all_nodes_raw() ->
     S = rabbit_mnesia:status(),
     Nodes = proplists:get_value(nodes, S),
     Types = proplists:get_keys(Nodes),
     Running = proplists:get_value(running_nodes, S),
-    rabbit_mgmt_db:augment_nodes(
-      [[{name, Node}, {type, Type}, {running, lists:member(Node, Running)}] ||
-          Type <- Types, Node <- proplists:get_value(Type, Nodes)]).
+    [[{name, Node}, {type, Type}, {running, lists:member(Node, Running)}] ||
+        Type <- Types, Node <- proplists:get_value(Type, Nodes)].
index db8f3361ce715d2177065675674cb984594c8f61..486016ca1777034965175c1e93fe419bf56f5c84 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_overview).
@@ -32,31 +32,31 @@ content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
 to_json(ReqData, Context = #context{user = User = #user{tags = Tags}}) ->
-    {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+    {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
     %% NB: this duplicates what's in /nodes but we want a global idea
     %% of this. And /nodes is not accessible to non-monitor users.
     ExchangeTypes = rabbit_mgmt_external_stats:list_registry_plugins(exchange),
     Overview0 = [{management_version,  version(rabbitmq_management)},
-                 {statistics_level,    StatsLevel},
+                 {rates_mode,          RatesMode},
                  {exchange_types,      ExchangeTypes},
                  {rabbitmq_version,    version(rabbit)},
                  {cluster_name,        rabbit_nodes:cluster_name()},
-                 {erlang_version,      erl_version(otp_release)},
-                 {erlang_full_version, erl_version(system_version)}],
+                 {erlang_version,      erlang_version()},
+                 {erlang_full_version, erlang_full_version()}],
     Range = rabbit_mgmt_util:range(ReqData),
     Overview =
         case rabbit_mgmt_util:is_monitor(Tags) of
             true ->
                 Overview0 ++
-                    [{K, {struct, V}} ||
-                        {K, V} <- rabbit_mgmt_db:get_overview(Range)] ++
+                    [{K, maybe_struct(V)} ||
+                        {K,V} <- rabbit_mgmt_db:get_overview(Range)] ++
                     [{node,               node()},
                      {statistics_db_node, stats_db_node()},
                      {listeners,          listeners()},
-                     {contexts,           rabbit_web_dispatch_contexts()}];
+                     {contexts,           web_contexts(ReqData)}];
             _ ->
                 Overview0 ++
-                    [{K, {struct, V}} ||
+                    [{K, maybe_struct(V)} ||
                         {K, V} <- rabbit_mgmt_db:get_overview(User, Range)]
         end,
     rabbit_mgmt_util:reply(Overview, ReqData, Context).
@@ -82,16 +82,21 @@ listeners() ->
        || L <- rabbit_networking:active_listeners()],
       ["protocol", "port", "node"] ).
 
+maybe_struct(L) when is_list(L) -> {struct, L};
+maybe_struct(V)                 -> V.
+
 %%--------------------------------------------------------------------
 
-rabbit_web_dispatch_contexts() ->
+web_contexts(ReqData) ->
     rabbit_mgmt_util:sort_list(
       lists:append(
-        [rabbit_web_dispatch_contexts(N) || N <- rabbit_mgmt_wm_nodes:all_nodes()]),
+        [fmt_contexts(N) || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData)]),
       ["description", "port", "node"]).
 
-rabbit_web_dispatch_contexts(N) ->
+fmt_contexts(N) ->
     [[{node, pget(name, N)} | C] || C <- pget(contexts, N, [])].
 
-erl_version(K) ->
-    list_to_binary(string:strip(erlang:system_info(K), both, $\n)).
+erlang_version() -> list_to_binary(rabbit_misc:otp_release()).
+
+erlang_full_version() ->
+    list_to_binary(string:strip(erlang:system_info(system_version), both, $\n)).
index b2665420dd9bb7562e1dbf21253602f30bb3a0f9..5af5d38bf30bfaf21645db87460d6963649b103b 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_parameter).
index c2f1e1b7f0d4cd558b4d452baca52abaf79b9075..0664bafe181577b03695c7eebc27712eec8b4b8c 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_parameters).
index 55eca612990003313a13dd1a02d650f094ef51a0..ec1046b64f30ab3c95043b7c799bc5601fb1d3e9 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_permission).
index 456bbfb5287f0012313befa0a1f1d096f798546f..52f4771ef2e5decc3f8c12f79cccfedc6d2ab766 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_permissions).
index 34280338db4bb0baeb9f26174ed6fbc4f9fce6ea..df19045209c58a223b5d48f6afe785ce9946db40 100644 (file)
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_permissions_user).
 
--export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([init/1, to_json/2, content_types_provided/2, resource_exists/2,
+         is_authorized/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
@@ -29,6 +30,12 @@ init(_Config) -> {ok, #context{}}.
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+resource_exists(ReqData, Context) ->
+    {case rabbit_mgmt_wm_user:user(ReqData) of
+         {ok, _}    -> true;
+         {error, _} -> false
+     end, ReqData, Context}.
+
 to_json(ReqData, Context) ->
     User = rabbit_mgmt_util:id(user, ReqData),
     Perms = rabbit_auth_backend_internal:list_user_permissions(User),
index 7472e960c21519e6118cb77bfe48623d6f3d4f7e..062a902222b44c974be59ee840cd3691f4d8cb8c 100644 (file)
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_permissions_vhost).
 
--export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([init/1, to_json/2, content_types_provided/2, resource_exists/2,
+         is_authorized/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
@@ -29,6 +30,9 @@ init(_Config) -> {ok, #context{}}.
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+resource_exists(ReqData, Context) ->
+    {rabbit_vhost:exists(rabbit_mgmt_wm_vhost:id(ReqData)), ReqData, Context}.
+
 to_json(ReqData, Context) ->
     VHost = rabbit_mgmt_util:id(vhost, ReqData),
     Perms = rabbit_auth_backend_internal:list_vhost_permissions(VHost),
index 74c9ee2ad1c013c485594f58c04e4af68521fd00..1236bcdb80934641e88876749eef52a9f9118059 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_policies).
index b22c3697c2bed9af77d12e03d5254b058315a048..fa9e1aaa10d4b446a084067b9cf615f69140f8a3 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_policy).
index 4c5a5a8e6f70ecf5645c33d785bcf4e87f8badb0..fb7e8ab48c0c03134be5e8929a76aeb6f70a9375 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_queue).
@@ -58,7 +58,9 @@ delete_resource(ReqData, Context) ->
     rabbit_mgmt_util:amqp_request(
       rabbit_mgmt_util:vhost(ReqData),
       ReqData, Context,
-      #'queue.delete'{ queue = rabbit_mgmt_util:id(queue, ReqData) }).
+      #'queue.delete'{ queue     = rabbit_mgmt_util:id(queue, ReqData),
+                       if_empty  = qs_true("if-empty", ReqData),
+                       if_unused = qs_true("if-unused", ReqData) }).
 
 is_authorized(ReqData, Context) ->
     rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
@@ -78,3 +80,5 @@ queue(VHost, QName) ->
         {ok, Q}            -> rabbit_mgmt_format:queue(Q);
         {error, not_found} -> not_found
     end.
+
+qs_true(Key, ReqData) -> "true" =:= wrq:get_qs_value(Key, ReqData).
index 16024579611a92d001d6c038b57e632ce2bef435..8998bfcc8145931ca59993dabf8cd42a9ab112bd 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_queue_get).
index 0d351133bcf84a77a5788a2570cf2b9b82ec8283..42aeb953212a120a3307a3c2685eec422fb73003 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_queue_purge).
index 58c3da0e19c288745cad1b8f556d46e6f96cd473..51265c7301d70c7f91e7702e1a25e11e1fd9d7ff 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_queues).
@@ -51,7 +51,12 @@ augmented(ReqData, Context) ->
         rabbit_mgmt_util:range_ceil(ReqData), basic)).
 
 basic(ReqData) ->
-    [rabbit_mgmt_format:queue(Q) || Q <- queues0(ReqData)].
+    [rabbit_mgmt_format:queue(Q) || Q <- queues0(ReqData)] ++
+        [rabbit_mgmt_format:queue(Q#amqqueue{state = down}) ||
+            Q <- down_queues(ReqData)].
 
 queues0(ReqData) ->
     rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list/1).
+
+down_queues(ReqData) ->
+    rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list_down/1).
index bf1c557605f64a5e47e19ab4622db88740f4f4b9..b8b65291b19a4217ff2b859ea9b309ff941fae8c 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_user).
@@ -19,7 +19,7 @@
 -export([init/1, resource_exists/2, to_json/2,
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
-         delete_resource/2, put_user/1]).
+         delete_resource/2, user/1, put_user/1]).
 
 -import(rabbit_misc, [pget/2]).
 
index 013561bb5952c9fce3af7d4f47c5f20653d81936..7bf07bf180094be68e30d0f75c450ccab2604a5b 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_users).
index 6c790641334eee7ab3659477e4442c4b03205a95..0908d90d48046a33a94e7223521bd8e8c28974fb 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_vhost).
@@ -19,7 +19,7 @@
 -export([init/1, resource_exists/2, to_json/2,
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
-         delete_resource/2, put_vhost/2]).
+         delete_resource/2, id/1, put_vhost/2]).
 
 -import(rabbit_misc, [pget/2]).
 
index 54bf9ff62144b942e580518713b50d0c6ae08e62..961b6f71b51027fe502285daed6727333b3aed2b 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_vhosts).
index 0d75b044e19f60204abab9243062d985360fabd9..564b394681039c55dc6ecc299dcbf651d8ffdca2 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Plugin.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_wm_whoami).
index 37831f86d5b418d518ec7931529bed75710a69eb..bf542776e129fecf5df2d8e4828b283cfed002bb 100644 (file)
@@ -7,8 +7,9 @@
   {env, [{listener,          [{port, 15672}]},
          {http_log_dir,      none},
          {load_definitions,  none},
+         {rates_mode,        basic},
          {sample_retention_policies,
-          %% List of {MaxAgeSecs, IfTimestampDivisibleBySecs}
+          %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
           [{global,   [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
            {basic,    [{605, 5}, {3600, 60}]},
            {detailed, [{10, 5}]}]}
index ef2f5be09f52c9a32bdb2c30d51651d2d1f32f1a..528ec33dc5bffdf3f31f657e005f6f4934b168bf 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_test_db).
@@ -204,11 +204,11 @@ delete_ch(Name, Timestamp) ->
     event(channel_closed, [{pid, pid_del(Name)}], Timestamp).
 
 event(Type, Stats, Timestamp) ->
-    gen_server:cast({global, rabbit_mgmt_db},
-                    {event, #event{type      = Type,
-                                   props     = Stats,
-                                   reference = none,
-                                   timestamp = sec_to_triple(Timestamp)}}).
+    ok = gen_server:call(rabbit_mgmt_db,
+                         {event, #event{type      = Type,
+                                        props     = Stats,
+                                        reference = none,
+                                        timestamp = sec_to_triple(Timestamp)}}).
 
 sec_to_triple(Sec) -> {Sec div 1000000, Sec rem 1000000, 0}.
 
@@ -218,7 +218,7 @@ sec_to_triple(Sec) -> {Sec div 1000000, Sec rem 1000000, 0}.
 
 range(F, L, I) ->
     R = #range{first = F * 1000, last = L * 1000, incr = I * 1000},
-    {R, R, R}.
+    {R, R, R, R}.
 
 get_x(Name, Range) ->
     [X] = rabbit_mgmt_db:augment_exchanges([x2(Name)], Range, full),
@@ -244,7 +244,7 @@ details0(R, AR, A, L) ->
      {avg_rate, AR},
      {avg,      A}].
 
-simple_details(Thing, N, {#range{first = First, last = Last}, _, _}) ->
+simple_details(Thing, N, {#range{first = First, last = Last}, _, _, _}) ->
     [{Thing, N},
      {atom_suffix(Thing, "_details"),
       details0(0.0, 0.0, N * 1.0, [{Last, N}, {First, N}])}].
index d7949091768d7c6c990ac053353aab876119f2c4..f56a330a7a55a4081bb746ed7a7a0851040fde6d 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_test_http).
@@ -174,6 +174,9 @@ permissions_list_test() ->
     2 = length(http_get("/users/myuser1/permissions")),
     1 = length(http_get("/users/myuser2/permissions")),
 
+    http_get("/users/notmyuser/permissions", ?NOT_FOUND),
+    http_get("/vhosts/notmyvhost/permissions", ?NOT_FOUND),
+
     http_delete("/users/myuser1", ?NO_CONTENT),
     http_delete("/users/myuser2", ?NO_CONTENT),
     http_delete("/vhosts/myvhost1", ?NO_CONTENT),
@@ -520,21 +523,25 @@ get_conn(Username, Password) ->
                    [LocalPort]),
     {Conn, ConnPath, ChPath, ConnChPath}.
 
-permissions_connection_channel_test() ->
+permissions_connection_channel_consumer_test() ->
     PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
     http_put("/users/user", [{password, <<"user">>},
                              {tags, <<"management">>}], ?NO_CONTENT),
     http_put("/permissions/%2f/user", PermArgs, ?NO_CONTENT),
     http_put("/users/monitor", [{password, <<"monitor">>},
-                            {tags, <<"monitoring">>}], ?NO_CONTENT),
+                                {tags, <<"monitoring">>}], ?NO_CONTENT),
     http_put("/permissions/%2f/monitor", PermArgs, ?NO_CONTENT),
+    http_put("/queues/%2f/test", [], ?NO_CONTENT),
+
     {Conn1, UserConn, UserCh, UserConnCh} = get_conn("user", "user"),
     {Conn2, MonConn, MonCh, MonConnCh} = get_conn("monitor", "monitor"),
     {Conn3, AdmConn, AdmCh, AdmConnCh} = get_conn("guest", "guest"),
-    {ok, _Ch1} = amqp_connection:open_channel(Conn1),
-    {ok, _Ch2} = amqp_connection:open_channel(Conn2),
-    {ok, _Ch3} = amqp_connection:open_channel(Conn3),
-
+    {ok, Ch1} = amqp_connection:open_channel(Conn1),
+    {ok, Ch2} = amqp_connection:open_channel(Conn2),
+    {ok, Ch3} = amqp_connection:open_channel(Conn3),
+    [amqp_channel:subscribe(
+       Ch, #'basic.consume'{queue = <<"test">>}, self()) ||
+        Ch <- [Ch1, Ch2, Ch3]],
     AssertLength = fun (Path, User, Len) ->
                            ?assertEqual(Len,
                                         length(http_get(Path, User, User, ?OK)))
@@ -543,7 +550,7 @@ permissions_connection_channel_test() ->
          AssertLength(P, "user", 1),
          AssertLength(P, "monitor", 3),
          AssertLength(P, "guest", 3)
-     end || P <- ["/connections", "/channels"]],
+     end || P <- ["/connections", "/channels", "/consumers", "/consumers/%2f"]],
 
     AssertRead = fun(Path, UserStatus) ->
                          http_get(Path, "user", "user", UserStatus),
@@ -573,6 +580,22 @@ permissions_connection_channel_test() ->
     http_delete("/users/monitor", ?NO_CONTENT),
     http_get("/connections/foo", ?NOT_FOUND),
     http_get("/channels/foo", ?NOT_FOUND),
+    http_delete("/queues/%2f/test", ?NO_CONTENT),
+    ok.
+
+consumers_test() ->
+    http_put("/queues/%2f/test", [], ?NO_CONTENT),
+    {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn("guest", "guest"),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    amqp_channel:subscribe(
+      Ch, #'basic.consume'{queue        = <<"test">>,
+                           no_ack       = false,
+                           consumer_tag = <<"my-ctag">> }, self()),
+    assert_list([[{exclusive,    false},
+                  {ack_required, true},
+                  {consumer_tag, <<"my-ctag">>}]], http_get("/consumers")),
+    amqp_connection:close(Conn),
+    http_delete("/queues/%2f/test", ?NO_CONTENT),
     ok.
 
 defs(Key, URI, CreateMethod, Args) ->
@@ -984,6 +1007,26 @@ publish_unrouted_test() ->
     ?assertEqual([{routed, false}],
                  http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK)).
 
+if_empty_unused_test() ->
+    http_put("/exchanges/%2f/test", [], ?NO_CONTENT),
+    http_put("/queues/%2f/test", [], ?NO_CONTENT),
+    http_post("/bindings/%2f/e/test/q/test", [], ?CREATED),
+    http_post("/exchanges/%2f/amq.default/publish",
+              msg(<<"test">>, [], <<"Hello world">>), ?OK),
+    http_delete("/queues/%2f/test?if-empty=true", ?BAD_REQUEST),
+    http_delete("/exchanges/%2f/test?if-unused=true", ?BAD_REQUEST),
+    http_delete("/queues/%2f/test/contents", ?NO_CONTENT),
+
+    {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn("guest", "guest"),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue = <<"test">> }, self()),
+    http_delete("/queues/%2f/test?if-unused=true", ?BAD_REQUEST),
+    amqp_connection:close(Conn),
+
+    http_delete("/queues/%2f/test?if-empty=true", ?NO_CONTENT),
+    http_delete("/exchanges/%2f/test?if-unused=true", ?NO_CONTENT),
+    passed.
+
 parameters_test() ->
     rabbit_runtime_parameters_test:register(),
 
index a3ecc80f8badaca92c6685fb6dc44d54d5554d01..de71872788f15ae5cc1d7f2618e8a94469c27681 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ Management Console.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mgmt_test_unit).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbitmqadmin-test-wrapper.sh b/rabbitmq-server/plugins-src/rabbitmq-management/test/src/rabbitmqadmin-test-wrapper.sh
new file mode 100755 (executable)
index 0000000..d684ec9
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh -e
+TWO=$(python2 -c 'import sys;print(sys.version_info[0])')
+THREE=$(python3 -c 'import sys;print(sys.version_info[0])')
+
+if [ $TWO != 2 ] ; then
+    echo Python 2 not found!
+    exit 1
+fi
+
+if [ $THREE != 3 ] ; then
+    echo Python 3 not found!
+    exit 1
+fi
+
+echo
+echo ----------------------
+echo Testing under Python 2
+echo ----------------------
+
+python2 $(dirname $0)/rabbitmqadmin-test.py
+
+echo
+echo ----------------------
+echo Testing under Python 3
+echo ----------------------
+
+python3 $(dirname $0)/rabbitmqadmin-test.py
index 8e1d3101a3f645067a251ec35c66e8edd1ebd37d..470af56551e23cc051d8ed649d81723ea18bdc8f 100755 (executable)
@@ -156,12 +156,12 @@ tracing: False
         self.run_success(['declare', 'queue', 'name=test'])
         self.run_success(['publish', 'routing_key=test', 'payload=test_1'])
         self.run_success(['publish', 'routing_key=test', 'payload=test_2'])
-        self.run_success(['publish', 'routing_key=test'], stdin='test_3')
+        self.run_success(['publish', 'routing_key=test'], stdin=b'test_3')
         self.assert_table([exp_msg('test', 2, False, 'test_1')], ['get', 'queue=test', 'requeue=false'])
         self.assert_table([exp_msg('test', 1, False, 'test_2')], ['get', 'queue=test', 'requeue=true'])
         self.assert_table([exp_msg('test', 1, True,  'test_2')], ['get', 'queue=test', 'requeue=false'])
         self.assert_table([exp_msg('test', 0, False, 'test_3')], ['get', 'queue=test', 'requeue=false'])
-        self.run_success(['publish', 'routing_key=test'], stdin='test_4')
+        self.run_success(['publish', 'routing_key=test'], stdin=b'test_4')
         filename = '/tmp/rabbitmq-test/get.txt'
         self.run_success(['get', 'queue=test', 'requeue=false', 'payload_file=' + filename])
         with open(filename) as f:
@@ -212,24 +212,28 @@ tracing: False
         args.extend(args0)
         self.assertEqual(expected, [l.split('\t') for l in self.admin(args)[0].splitlines()])
 
-    def admin(self, args, stdin=None):
-        return run('../../../bin/rabbitmqadmin', args, stdin)
+    def admin(self, args0, stdin=None):
+        args = ['python{0}'.format(sys.version_info[0]),
+                norm('../../../bin/rabbitmqadmin')]
+        args.extend(args0)
+        return run(args, stdin)
 
     def ctl(self, args0, stdin=None):
-        args = ['-n', 'rabbit-test']
+        args = [norm('../../../../rabbitmq-server/scripts/rabbitmqctl'), '-n', 'rabbit-test']
         args.extend(args0)
-        (stdout, ret) = run('../../../../rabbitmq-server/scripts/rabbitmqctl', args, stdin)
+        (stdout, ret) = run(args, stdin)
         if ret != 0:
             self.fail(stdout)
 
-def run(cmd, args, stdin):
-    path = os.path.normpath(os.path.join(os.getcwd(), sys.argv[0], cmd))
-    cmdline = [path]
-    cmdline.extend(args)
-    proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+def norm(cmd):
+    return os.path.normpath(os.path.join(os.getcwd(), sys.argv[0], cmd))
+
+def run(args, stdin):
+    proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     (stdout, stderr) = proc.communicate(stdin)
     returncode = proc.returncode
-    return (stdout + stderr, returncode)
+    res = stdout.decode('utf-8') + stderr.decode('utf-8')
+    return (res, returncode)
 
 def l(thing):
     return ['list', thing, 'name']
@@ -239,6 +243,8 @@ def exp_msg(key, count, redelivered, payload):
     return [key, '', str(count), payload, str(len(payload)), 'string', '', str(redelivered)]
 
 if __name__ == '__main__':
-    print "\nrabbitmqadmin tests\n===================\n"
+    print("\nrabbitmqadmin tests\n===================\n")
     suite = unittest.TestLoader().loadTestsFromTestCase(TestRabbitMQAdmin)
-    unittest.TextTestRunner(verbosity=2).run(suite)
+    results = unittest.TextTestRunner(verbosity=2).run(suite)
+    if not results.wasSuccessful():
+        sys.exit(1)
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-mqtt/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 8104c7981d59b97dbd71676db338e676656f722c..b620a31cd2b98e377670b00f4ae7d4c9540b2221 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -define(CLIENT_ID_MAXLEN, 23).
@@ -40,4 +40,5 @@
                       will_msg,
                       channels,
                       connection,
-                      exchange }).
+                      exchange,
+                      ssl_login_name }).
index 87f24d5d7732b59d5976ad6a8077b9dc5b66fba6..968f98641e915f72bbe118c0921352a563761b15 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -define(PROTOCOL_NAMES,  [{3, "MQIsdp"}, {4, "MQTT"}]).
index e564fab97cdab455a84b4d5153e0662a4f3e9175..c5d5bcc756469822f598ce3e6c8738124c6511e5 100644 (file)
@@ -1,5 +1,19 @@
 RELEASABLE:=true
-DEPS:=rabbitmq-erlang-client
-
-RABBITMQ_TEST_PATH=$(PACKAGE_DIR)/../../rabbitmq-test
+DEPS:=rabbitmq-server rabbitmq-erlang-client rabbitmq-test
 WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/test.sh
+WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/test/ebin/test
+WITH_BROKER_SETUP_SCRIPTS:=$(PACKAGE_DIR)/test/setup-rabbit-test.sh
+
+define package_rules
+
+$(PACKAGE_DIR)+pre-test::
+       rm -rf $(PACKAGE_DIR)/test/certs
+       mkdir $(PACKAGE_DIR)/test/certs
+       mkdir -p $(PACKAGE_DIR)/test/ebin
+       sed -E -e "s|%%CERTS_DIR%%|$(abspath $(PACKAGE_DIR))/test/certs|g" < $(PACKAGE_DIR)/test/src/test.config > $(PACKAGE_DIR)/test/ebin/test.config
+       $(MAKE) -C $(PACKAGE_DIR)/../rabbitmq-test/certs all PASSWORD=bunnychow DIR=$(abspath $(PACKAGE_DIR))/test/certs
+
+$(PACKAGE_DIR)+clean::
+       rm -rf $(PACKAGE_DIR)/test/certs
+
+endef
index 25e191ac037b63f537a502f65c2ecdc1dc0fc832..92c2916e3f9198f667ae5cb4927b1940c65282dc 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt).
index 26009ee405fdab07cf49d17329f5c34b48309163..52a3da9cf43574e35b12d1dd9cf65c74ccff5f4a 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_collector).
index dd722c06baa4784ecd981f09471938b335e932e8..fd083a4cf6303d1a99665b91ce9cdcaa6fcdcf9c 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_connection_sup).
index 39172c7b47d03b3d8618ff759b6414186a16dd95..628241144bac293ed8887779e1e490bd42536592 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_frame).
index eb17673128f3e9c3faf2ca43aa8267f434bcfe7a..8ab736c458285610d84a75dde87a704da5d71870 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_processor).
 
--export([info/2, initial_state/1,
+-export([info/2, initial_state/2,
          process_frame/2, amqp_pub/2, amqp_callback/2, send_will/1,
          close_connection/1]).
 
 -include("rabbit_mqtt_frame.hrl").
 -include("rabbit_mqtt.hrl").
 
+-define(APP, rabbitmq_mqtt).
 -define(FRAME_TYPE(Frame, Type),
         Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }}).
 
-initial_state(Socket) ->
+initial_state(Socket,SSLLoginName) ->
     #proc_state{ unacked_pubs  = gb_trees:empty(),
                  awaiting_ack  = gb_trees:empty(),
                  message_id    = 1,
@@ -35,7 +36,8 @@ initial_state(Socket) ->
                  consumer_tags = {undefined, undefined},
                  channels      = {undefined, undefined},
                  exchange      = rabbit_mqtt_util:env(exchange),
-                 socket        = Socket }.
+                 socket        = Socket,
+                 ssl_login_name = SSLLoginName }.
 
 info(client_id, #proc_state{ client_id = ClientId }) -> ClientId.
 
@@ -44,14 +46,8 @@ process_frame(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
   when Type =/= ?CONNECT ->
     {error, connect_expected, PState};
 process_frame(Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
-              PState ) ->
-    %%rabbit_log:info("MQTT received frame ~p ~n", [Frame]),
-    try process_request(Type, Frame, PState) of
-        Result -> Result
-    catch _:Error ->
-        close_connection(PState),
-        {error, Error}
-    end.
+              PState) ->
+    process_request(Type, Frame, PState).
 
 process_request(?CONNECT,
                 #mqtt_frame{ variable = #mqtt_frame_connect{
@@ -60,7 +56,8 @@ process_request(?CONNECT,
                                           proto_ver  = ProtoVersion,
                                           clean_sess = CleanSess,
                                           client_id  = ClientId0,
-                                          keep_alive = Keepalive} = Var}, PState) ->
+                                          keep_alive = Keepalive} = Var},
+                PState = #proc_state{ ssl_login_name = SSLLoginName }) ->
     ClientId = case ClientId0 of
                    []    -> rabbit_mqtt_util:gen_client_id();
                    [_|_] -> ClientId0
@@ -73,7 +70,7 @@ process_request(?CONNECT,
             {_, true} ->
                 {?CONNACK_INVALID_ID, PState};
             _ ->
-                case creds(Username, Password) of
+                case creds(Username, Password, SSLLoginName) of
                     nocreds ->
                         rabbit_log:error("MQTT login failed - no credentials~n"),
                         {?CONNACK_CREDENTIALS, PState};
@@ -82,6 +79,8 @@ process_request(?CONNECT,
                             {?CONNACK_ACCEPT, Conn} ->
                                 link(Conn),
                                 {ok, Ch} = amqp_connection:open_channel(Conn),
+                                link(Ch),
+                                amqp_channel:enable_delivery_flow_control(Ch),
                                 ok = rabbit_mqtt_collector:register(
                                   ClientId, self()),
                                 Prefetch = rabbit_mqtt_util:env(prefetch),
@@ -211,10 +210,12 @@ amqp_callback({#'basic.deliver'{ consumer_tag = ConsumerTag,
                                  delivery_tag = DeliveryTag,
                                  routing_key  = RoutingKey },
                #amqp_msg{ props = #'P_basic'{ headers = Headers },
-                          payload = Payload }} = Delivery,
+                          payload = Payload },
+               DeliveryCtx} = Delivery,
               #proc_state{ channels      = {Channel, _},
                            awaiting_ack  = Awaiting,
                            message_id    = MsgId } = PState) ->
+    amqp_channel:notify_received(DeliveryCtx),
     case {delivery_dup(Delivery), delivery_qos(ConsumerTag, Headers, PState)} of
         {true, {?QOS_0, ?QOS_1}} ->
             amqp_channel:cast(
@@ -278,7 +279,8 @@ amqp_callback(#'basic.ack'{ multiple = false, delivery_tag = Tag },
     {ok, PState #proc_state{ unacked_pubs = gb_trees:delete(Tag, UnackedPubs) }}.
 
 delivery_dup({#'basic.deliver'{ redelivered = Redelivered },
-              #amqp_msg{ props = #'P_basic'{ headers = Headers }}}) ->
+              #amqp_msg{ props = #'P_basic'{ headers = Headers }},
+              _DeliveryCtx}) ->
     case rabbit_mqtt_util:table_lookup(Headers, <<"x-mqtt-dup">>) of
         undefined   -> Redelivered;
         {bool, Dup} -> Redelivered orelse Dup
@@ -366,23 +368,27 @@ get_vhost_username(UserBin) ->
         [UserBin]         -> {rabbit_mqtt_util:env(vhost), UserBin}
     end.
 
-creds(User, Pass) ->
-    DefaultUser = rabbit_mqtt_util:env(default_user),
-    DefaultPass = rabbit_mqtt_util:env(default_pass),
-    Anon        = rabbit_mqtt_util:env(allow_anonymous),
-    U = case {User =/= undefined, is_binary(DefaultUser), Anon =:= true} of
-             {true,  _,    _   } -> list_to_binary(User);
-             {false, true, true} -> DefaultUser;
-             _                   -> nocreds
+creds(User, Pass, SSLLoginName) ->
+    DefaultUser   = rabbit_mqtt_util:env(default_user),
+    DefaultPass   = rabbit_mqtt_util:env(default_pass),
+    {ok, Anon}    = application:get_env(?APP, allow_anonymous),
+    {ok, TLSAuth} = application:get_env(?APP, ssl_cert_login),
+    U = case {User =/= undefined, is_binary(DefaultUser),
+              Anon =:= true, (TLSAuth andalso SSLLoginName =/= none)} of
+             {true,  _,    _,    _}     -> list_to_binary(User);
+             {false, _,    _,    true}  -> SSLLoginName;
+             {false, true, true, false} -> DefaultUser;
+             _                          -> nocreds
         end,
     case U of
         nocreds ->
             nocreds;
         _ ->
-            case {Pass =/= undefined, is_binary(DefaultPass), Anon =:= true} of
-                 {true,  _,    _   } -> {U, list_to_binary(Pass)};
-                 {false, true, true} -> {U, DefaultPass};
-                 _                   -> {U, none}
+            case {Pass =/= undefined, is_binary(DefaultPass), Anon =:= true, SSLLoginName == U} of
+                 {true,  _,    _,    _} -> {U, list_to_binary(Pass)};
+                 {false, _,    _,    _} -> {U, none};
+                 {false, true, true, _} -> {U, DefaultPass};
+                 _                      -> {U, none}
             end
     end.
 
@@ -489,7 +495,14 @@ amqp_pub(#mqtt_msg{ qos        = Qos,
 
 adapter_info(Sock, ProtoVer) ->
     amqp_connection:socket_adapter_info(
-             Sock, {'MQTT', integer_to_list(ProtoVer)}).
+             Sock, {'MQTT', human_readable_mqtt_version(ProtoVer)}).
+
+human_readable_mqtt_version(3) ->
+    "3.1.0";
+human_readable_mqtt_version(4) ->
+    "3.1.1";
+human_readable_mqtt_version(_) ->
+    "N/A".
 
 send_client(Frame, #proc_state{ socket = Sock }) ->
     %rabbit_log:info("MQTT sending frame ~p ~n", [Frame]),
@@ -508,4 +521,3 @@ close_connection(PState = #proc_state{ connection = Connection,
     catch amqp_connection:close(Connection),
     PState #proc_state{ channels   = {undefined, undefined},
                         connection = undefined }.
-
index d2d6b04d218e5021681090e5a603065ab0f76078..30cf03288a612dfab7ff95ef24c4213a0707c821 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_reader).
@@ -52,7 +52,7 @@ handle_cast({go, Sock0, SockTransform, KeepaliveSup}, undefined) ->
                 {ok, Sock} ->
                     rabbit_alarm:register(
                       self(), {?MODULE, conserve_resources, []}),
-                    ProcessorState = rabbit_mqtt_processor:initial_state(Sock),
+                    ProcessorState = rabbit_mqtt_processor:initial_state(Sock,ssl_login_name(Sock)),
                     {noreply,
                      control_throttle(
                        #state{socket           = Sock,
@@ -90,9 +90,10 @@ handle_cast(duplicate_id,
 handle_cast(Msg, State) ->
     {stop, {mqtt_unexpected_cast, Msg}, State}.
 
-handle_info({#'basic.deliver'{}, #amqp_msg{}} = Delivery,
+handle_info({#'basic.deliver'{}, #amqp_msg{}, _DeliveryCtx} = Delivery,
             State = #state{ proc_state = ProcState }) ->
-    callback_reply(State, rabbit_mqtt_processor:amqp_callback(Delivery, ProcState));
+    callback_reply(State, rabbit_mqtt_processor:amqp_callback(Delivery,
+                                                              ProcState));
 
 handle_info(#'basic.ack'{} = Ack, State = #state{ proc_state = ProcState }) ->
     callback_reply(State, rabbit_mqtt_processor:amqp_callback(Ack, ProcState));
@@ -137,9 +138,10 @@ handle_info({start_keepalives, Keepalive},
                     KeepaliveSup, Sock, 0, SendFun, Keepalive, ReceiveFun),
     {noreply, State #state { keepalive = Heartbeater }};
 
-handle_info(keepalive_timeout, State = #state { conn_name = ConnStr }) ->
+handle_info(keepalive_timeout, State = #state {conn_name = ConnStr,
+                                               proc_state = PState}) ->
     log(error, "closing MQTT connection ~p (keepalive timeout)~n", [ConnStr]),
-    {stop, {shutdown, keepalive_timeout}, State};
+    send_will_and_terminate(PState, {shutdown, keepalive_timeout}, State);
 
 handle_info(Msg, State) ->
     {stop, {mqtt_unexpected_msg, Msg}, State}.
@@ -177,19 +179,30 @@ terminate({network_error, Reason, ConnStr}, _State) ->
 terminate({network_error, Reason}, _State) ->
     log(error, "MQTT detected network error: ~p~n", [Reason]);
 
-terminate(normal, State = #state{proc_state = ProcState,
-                                 conn_name  = ConnName}) ->
+terminate(normal, #state{proc_state = ProcState,
+                         conn_name  = ConnName}) ->
     rabbit_mqtt_processor:close_connection(ProcState),
     log(info, "closing MQTT connection ~p (~s)~n", [self(), ConnName]),
     ok;
 
-terminate(_Reason, State = #state{proc_state = ProcState}) ->
+terminate(_Reason, #state{proc_state = ProcState}) ->
     rabbit_mqtt_processor:close_connection(ProcState),
     ok.
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
 
+ssl_login_name(Sock) ->
+  case rabbit_net:peercert(Sock) of
+      {ok, C}              -> case rabbit_ssl:peer_cert_auth_name(C) of
+                                    unsafe    -> none;
+                                    not_found -> none;
+                                    Name      -> Name
+                                end;
+      {error, no_peercert} -> none;
+      nossl                -> none
+  end.
+
 %%----------------------------------------------------------------------------
 
 process_received_bytes(<<>>, State) ->
@@ -213,7 +226,7 @@ process_received_bytes(Bytes,
                                     proc_state = ProcState1 });
                 {error, Reason, ProcState1} ->
                     log(info, "MQTT protocol error ~p for connection ~p~n",
-                        [ConnStr, Reason]),
+                        [Reason, ConnStr]),
                     {stop, {shutdown, Reason}, pstate(State, ProcState1)};
                 {error, Error} ->
                     log(error, "MQTT detected framing error '~p' for connection ~p~n",
@@ -230,7 +243,7 @@ process_received_bytes(Bytes,
 
 callback_reply(State, {ok, ProcState}) ->
     {noreply, pstate(State, ProcState), hibernate};
-callback_reply(State, {err, Reason, ProcState}) ->
+callback_reply(State, {error, Reason, ProcState}) ->
     {stop, Reason, pstate(State, ProcState)}.
 
 start_keepalive(_,   0        ) -> ok;
@@ -241,13 +254,15 @@ pstate(State = #state {}, PState = #proc_state{}) ->
 
 %%----------------------------------------------------------------------------
 
-log(Level, Fmt)       -> rabbit_log:log(connection, Level, Fmt, []).
 log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
 
 send_will_and_terminate(PState, State) ->
+    send_will_and_terminate(PState, {shutdown, conn_closed}, State).
+
+send_will_and_terminate(PState, Reason, State) ->
     rabbit_mqtt_processor:send_will(PState),
     % todo: flush channel after publish
-    {stop, {shutdown, conn_closed}, State}.
+    {stop, Reason, State}.
 
 network_error(closed,
               State = #state{ conn_name  = ConnStr,
index a2b3a2351410d3594b31ed4d7e932b6fde31b447..38edcf9659ebf9599bfb364a1da36e23b00b531b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_sup).
 start_link(Listeners, []) ->
     supervisor2:start_link({local, ?MODULE}, ?MODULE, [Listeners]).
 
-init([{Listeners, SslListeners}]) ->
+init([{Listeners, SslListeners0}]) ->
     {ok, SocketOpts} = application:get_env(rabbitmq_mqtt, tcp_listen_options),
-    SslOpts = case SslListeners of
-                  [] -> none;
-                  _  -> rabbit_networking:ensure_ssl()
-              end,
+    {SslOpts, SslListeners}
+        = case SslListeners0 of
+              [] -> {none, []};
+              _  -> {rabbit_networking:ensure_ssl(),
+                     case rabbit_networking:poodle_check('MQTT') of
+                         ok     -> SslListeners0;
+                         danger -> []
+                     end}
+          end,
     {ok, {{one_for_all, 10, 10},
           [{collector,
             {rabbit_mqtt_collector, start_link, []},
index c675ebdc1072b84523eee48411ecee373ebbc98e..9c1787a5898fa5d3c63f13ac0e6c21e4694065fd 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mqtt_util).
index b687c0a9927d7c5df6f35c13e6a29ee8eb54d591..6b60610af46b95f67cf0ee6b46acbc4bddd1e7c0 100644 (file)
@@ -6,6 +6,7 @@
   {mod, {rabbit_mqtt, []}},
   {env, [{default_user, <<"guest">>},
          {default_pass, <<"guest">>},
+         {ssl_cert_login,false},
          {allow_anonymous, true},
          {vhost, <<"/">>},
          {exchange, <<"amq.topic">>},
index 65382143763ce8880c1e352442baa20f0e51d96c..190e740f58d023f29193d2be144c92a9d75f1233 100644 (file)
@@ -11,21 +11,23 @@ JUNIT_JAR=../lib/junit.jar
 JAVA_AMQP_DIR=../../rabbitmq-java-client/
 JAVA_AMQP_CLASSES=$(JAVA_AMQP_DIR)build/classes/
 
-TEST_SRCS:=$(shell find $(TEST_SRC) -name '*.java')
 ALL_CLASSES:=$(foreach f,$(shell find src -name '*.class'),'$(f)')
-TEST_CLASSES:=$(TEST_SRCS:.java=.class)
 CP:=$(PAHO_JAR):$(JUNIT_JAR):$(TEST_SRC):$(JAVA_AMQP_CLASSES)
 
+HOSTNAME:=$(shell hostname)
+
 define class_from_path
 $(subst .class,,$(subst src.,,$(subst /,.,$(1))))
 endef
 
 .PHONY: test
-test: $(TEST_CLASSES) build_java_amqp
-       $(foreach test,$(TEST_CLASSES),CLASSPATH=$(CP) java  junit.textui.TestRunner -text $(call class_from_path,$(test)))
+test: build_java_amqp
+       ant test -Dhostname=$(HOSTNAME)
 
 clean:
-       rm -rf $(PAHO_JAR) $(ALL_CLASSES)
+       ant clean
+       rm -rf test_client
+
 
 distclean: clean
        rm -rf $(CHECKOUT_DIR)
@@ -34,13 +36,7 @@ $(CHECKOUT_DIR):
        git clone $(UPSTREAM_GIT) $@
        (cd $@ && git checkout $(REVISION)) || rm -rf $@
 
-$(PAHO_JAR): $(CHECKOUT_DIR)
-       ant -buildfile $</org.eclipse.paho.client.mqttv3/build.xml \
-       -Dship.folder=. -Dmqttv3-client-jar=$(PAHO_JAR_NAME) full
-
-%.class: %.java $(PAHO_JAR) $(JUNIT_JAR)
-       $(JC) -cp $(CP) $<
 
 .PHONY: build_java_amqp
-build_java_amqp:
-       make -C $(JAVA_AMQP_DIR)
+build_java_amqp: $(CHECKOUT_DIR)
+       $(MAKE) -C $(JAVA_AMQP_DIR) jar
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/build.properties b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/build.properties
new file mode 100644 (file)
index 0000000..25da28c
--- /dev/null
@@ -0,0 +1,17 @@
+build.out=build
+test.resources=${build.out}/test/resources
+javac.debug=true
+test.javac.out=${build.out}/test/classes
+test.resources=${build.out}/test/resources
+test.src.home=src
+certs.dir=certs
+certs.password=test
+server.keystore=${test.resources}/server.jks
+server.cert=${certs.dir}/server/cert.pem
+ca.cert=${certs.dir}/testca/cacert.pem
+server.keystore.phrase=bunnyhop
+
+client.keystore=${test.resources}/client.jks
+client.keystore.phrase=bunnychow
+client.srckeystore=${certs.dir}/client/keycert.p12
+client.srckeystore.password=bunnychow
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/build.xml b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/build.xml
new file mode 100644 (file)
index 0000000..1f80b16
--- /dev/null
@@ -0,0 +1,150 @@
+<?xml version="1.0"?>
+<project name="MQTT Java Test client" default="build">
+
+  <property name="output.folder" value="./target/work" />
+  <property name="ship.folder" value="./" />
+
+  <property file="build.properties"/>
+
+  <property name="java-amqp-client-path" location="../../rabbitmq-java-client" />
+
+  <path id="test.javac.classpath">
+    <!-- cf dist target, infra -->
+    <fileset dir="lib">
+      <include name="**/*.jar"/>
+    </fileset>
+    <fileset dir="test_client">
+      <include name="**/*.jar"/>
+    </fileset>
+    <fileset dir="${java-amqp-client-path}">
+      <include name="**/rabbitmq-client.jar" />
+    </fileset>
+  </path>
+
+  <target name="clean-paho" description="Clean compiled Eclipe Paho Test Client jars" >
+    <ant antfile="test_client/org.eclipse.paho.client.mqttv3/build.xml" useNativeBasedir="true" target="clean"/>
+  </target>
+
+  <target name="clean"  >
+    <delete dir="${build.out}"/>
+  </target>
+
+  <target name="build-paho" depends="clean-paho" description="Build the Eclipse Paho Test Client">
+    <ant antfile="test_client/org.eclipse.paho.client.mqttv3/build.xml" useNativeBasedir="true" />
+  </target>
+
+  <target name="detect-ssl">
+    <available property="SSL_AVAILABLE" file="${certs.dir}/client"/>
+    <property name="CLIENT_KEYSTORE_PHRASE" value="bunnies"/>
+    <property name="SSL_P12_PASSWORD" value="${certs.password}"/>
+  </target>
+
+  <target name="detect-tmpdir">
+    <property environment="env"/>
+    <condition property="TMPDIR" value="${env.TMPDIR}" else="/tmp">
+      <available file="${env.TMPDIR}" type="dir"/>
+    </condition>
+  </target>
+
+  <target name="make-server-keystore" if="SSL_AVAILABLE" depends="detect-ssl, detect-tmpdir">
+    <mkdir dir="${test.resources}"/>
+    <exec executable="keytool" failonerror="true" osfamily="unix">
+      <arg line="-import"/>
+      <arg value="-alias"/>
+      <arg value="server1"/>
+      <arg value="-file"/>
+      <arg value="${server.cert}"/>
+      <arg value="-keystore"/>
+      <arg value="${server.keystore}"/>
+      <arg value="-noprompt"/>
+      <arg value="-storepass"/>
+      <arg value="${server.keystore.phrase}"/>
+    </exec>
+    <exec executable="keytool" failonerror="true" osfamily="unix">
+          <arg line="-import"/>
+          <arg value="-alias"/>
+          <arg value="testca"/>
+          <arg value="-trustcacerts"/>
+          <arg value="-file"/>
+          <arg value="${ca.cert}"/>
+          <arg value="-keystore"/>
+          <arg value="${server.keystore}"/>
+          <arg value="-noprompt"/>
+          <arg value="-storepass"/>
+          <arg value="${server.keystore.phrase}"/>
+        </exec>
+  </target>
+
+ <target name="make-client-keystore" if="SSL_AVAILABLE" depends="detect-ssl, detect-tmpdir">
+   <mkdir dir="${test.resources}"/>
+     <exec executable="keytool" failonerror="true" osfamily="unix">
+       <arg line="-importkeystore"/>
+       <arg line="-srckeystore" />
+       <arg line="${client.srckeystore}" />
+       <arg value="-srcstoretype"/>
+       <arg value="PKCS12"/>
+       <arg value="-srcstorepass"/>
+       <arg value="${client.srckeystore.password}"/>
+       <arg value="-destkeystore"/>
+       <arg value="${client.keystore}"/>
+       <arg value="-deststoretype"/>
+       <arg value="JKS"/>
+       <arg value="-noprompt"/>
+       <arg value="-storepass"/>
+      <arg value="${client.keystore.phrase}"/>
+    </exec>
+  </target>
+
+  <target name="test-build" depends="clean,build-paho">
+    <mkdir dir="${test.javac.out}"/>
+
+    <javac srcdir="./src"
+      destdir="${test.javac.out}"
+      debug="on"
+      includeantruntime="false" >
+      <classpath>
+        <path refid="test.javac.classpath"/>
+      </classpath>
+    </javac>
+  </target>
+
+  <target name="test-ssl" depends="test-build, make-server-keystore, make-client-keystore" if="SSL_AVAILABLE">
+    <junit printSummary="withOutAndErr"
+      haltOnFailure="true"
+      failureproperty="test.failure"
+      fork="yes">
+      <classpath>
+        <path refid="test.javac.classpath"/>
+        <pathelement path="${test.javac.out}"/>
+        <pathelement path="${test.resources}"/>
+      </classpath>
+      <jvmarg value="-Dhostname=${hostname}"/>
+      <jvmarg value="-Dserver.keystore.passwd=${server.keystore.phrase}"/>
+      <jvmarg value="-Dclient.keystore.passwd=${client.keystore.phrase}"/>
+      <formatter type="plain"/>
+      <formatter type="xml"/>
+      <test todir="${build.out}" name="com.rabbitmq.mqtt.test.tls.MqttSSLTest"/>
+    </junit>
+  </target>
+
+  <target name="test-server" depends="test-build">
+    <junit printSummary="withOutAndErr"
+        haltOnFailure="true"
+        failureproperty="test.failure"
+        fork="yes">
+        <classpath>
+          <path refid="test.javac.classpath"/>
+          <pathelement path="${test.javac.out}"/>
+        </classpath>
+
+      <formatter type="plain"/>
+      <formatter type="xml"/>
+      <test todir="${build.out}" name="com.rabbitmq.mqtt.test.MqttTest"/>
+    </junit>
+  </target>
+
+  <target name="test" depends="test-server, test-ssl" description="Build the test mqtt client libraries.">
+
+  </target>
+
+</project>
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/rabbit-test.sh b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/rabbit-test.sh
new file mode 100755 (executable)
index 0000000..b0c6585
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+CTL=$1
+USER="O=client,CN=$(hostname)"
+
+$CTL add_user "$USER" ''
+$CTL set_permissions -p / "$USER" ".*" ".*" ".*"
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/setup-rabbit-test.sh b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/setup-rabbit-test.sh
new file mode 100755 (executable)
index 0000000..9b2708a
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh -e
+sh -e `dirname $0`/rabbit-test.sh "`dirname $0`/../../rabbitmq-server/scripts/rabbitmqctl -n rabbit-test"
index 0453c915295f72df52bbb4527f73055653e2704a..b5a491375781feda8c59edccd7ce263199fff77a 100644 (file)
@@ -11,7 +11,7 @@
 //  The Original Code is RabbitMQ.
 //
 //  The Initial Developer of the Original Code is GoPivotal, Inc.
-//  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+//  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 //
 
 package com.rabbitmq.mqtt.test;
@@ -40,6 +40,7 @@ import java.net.Socket;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 /***
  *  MQTT v3.1 tests
@@ -110,7 +111,7 @@ public class MqttTest extends TestCase implements MqttCallback {
         } catch (Exception _) {}
     }
 
-    private void setUpAmqp() throws IOException {
+    private void setUpAmqp() throws IOException, TimeoutException {
         connectionFactory = new ConnectionFactory();
         connectionFactory.setHost(host);
         conn = connectionFactory.newConnection();
@@ -377,7 +378,7 @@ public class MqttTest extends TestCase implements MqttCallback {
         }
     }
 
-    public void testInteropM2A() throws MqttException, IOException, InterruptedException {
+    public void testInteropM2A() throws MqttException, IOException, InterruptedException, TimeoutException {
         setUpAmqp();
         String queue = ch.queueDeclare().getQueue();
         ch.queueBind(queue, "amq.topic", topic);
@@ -393,7 +394,7 @@ public class MqttTest extends TestCase implements MqttCallback {
         tearDownAmqp();
     }
 
-    public void testInteropA2M() throws MqttException, IOException, InterruptedException {
+    public void testInteropA2M() throws MqttException, IOException, InterruptedException, TimeoutException {
         client.connect(conOpt);
         client.setCallback(this);
         client.subscribe(topic, 1);
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/rabbit-test.sh b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/rabbit-test.sh
new file mode 100644 (file)
index 0000000..3601b4c
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+CTL=$1
+USER="O=client,CN=$(hostname)"
+
+# Test direct connections
+$CTL add_user "$USER" ''
+$CTL set_permissions -p / "$USER" ".*" ".*" ".*"
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/setup-rabbit-test.sh b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/setup-rabbit-test.sh
new file mode 100644 (file)
index 0000000..9b2708a
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh -e
+sh -e `dirname $0`/rabbit-test.sh "`dirname $0`/../../rabbitmq-server/scripts/rabbitmqctl -n rabbit-test"
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java
new file mode 100644 (file)
index 0000000..f89d963
--- /dev/null
@@ -0,0 +1,166 @@
+//  The contents of this file are subject to the Mozilla Public License
+//  Version 1.1 (the "License"); you may not use this file except in
+//  compliance with the License. You may obtain a copy of the License
+//  at http://www.mozilla.org/MPL/
+//
+//  Software distributed under the License is distributed on an "AS IS"
+//  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+//  the License for the specific language governing rights and
+//  limitations under the License.
+//
+//  The Original Code is RabbitMQ.
+//
+//  The Initial Developer of the Original Code is GoPivotal, Inc.
+//  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+//
+
+package com.rabbitmq.mqtt.test.tls;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.eclipse.paho.client.mqttv3.IMqttDeliveryToken;
+import org.eclipse.paho.client.mqttv3.MqttCallback;
+import org.eclipse.paho.client.mqttv3.MqttClient;
+import org.eclipse.paho.client.mqttv3.MqttConnectOptions;
+import org.eclipse.paho.client.mqttv3.MqttException;
+import org.eclipse.paho.client.mqttv3.MqttMessage;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+
+/**
+ * MQTT v3.1 tests
+ * TODO: synchronise access to variables
+ */
+
+public class MqttSSLTest extends TestCase implements MqttCallback {
+
+    private final int port = 8883;
+    private final String brokerUrl = "ssl://" + getHost() + ":" + port;
+    private String clientId;
+    private String clientId2;
+    private MqttClient client;
+    private MqttClient client2;
+    private MqttConnectOptions conOpt;
+    private ArrayList<MqttMessage> receivedMessages;
+
+    private long lastReceipt;
+    private boolean expectConnectionFailure;
+
+
+    private static final String getHost() {
+        Object host = System.getProperty("hostname");
+        assertNotNull(host);
+        return host.toString();
+    }
+
+    // override 10s limit
+    private class MyConnOpts extends MqttConnectOptions {
+        private int keepAliveInterval = 60;
+
+        @Override
+        public void setKeepAliveInterval(int keepAliveInterval) {
+            this.keepAliveInterval = keepAliveInterval;
+        }
+
+        @Override
+        public int getKeepAliveInterval() {
+            return keepAliveInterval;
+        }
+    }
+
+
+    @Override
+    public void setUp() throws MqttException, IOException {
+        clientId = getClass().getSimpleName() + ((int) (10000 * Math.random()));
+        clientId2 = clientId + "-2";
+        client = new MqttClient(brokerUrl, clientId, null);
+        client2 = new MqttClient(brokerUrl, clientId2, null);
+        conOpt = new MyConnOpts();
+        conOpt.setSocketFactory(MutualAuth.getSSLContextWithoutCert().getSocketFactory());
+        setConOpts(conOpt);
+        receivedMessages = new ArrayList<MqttMessage>();
+        expectConnectionFailure = false;
+    }
+
+    @Override
+    public void tearDown() throws MqttException {
+        // clean any sticky sessions
+        setConOpts(conOpt);
+        client = new MqttClient(brokerUrl, clientId, null);
+        try {
+            client.connect(conOpt);
+            client.disconnect();
+        } catch (Exception _) {
+        }
+
+        client2 = new MqttClient(brokerUrl, clientId2, null);
+        try {
+            client2.connect(conOpt);
+            client2.disconnect();
+        } catch (Exception _) {
+        }
+    }
+
+
+    private void setConOpts(MqttConnectOptions conOpts) {
+        // provide authentication if the broker needs it
+        // conOpts.setUserName("guest");
+        // conOpts.setPassword("guest".toCharArray());
+        conOpts.setCleanSession(true);
+        conOpts.setKeepAliveInterval(60);
+    }
+
+    public void testCertLogin() throws MqttException {
+        try {
+            conOpt.setSocketFactory(MutualAuth.getSSLContextWithClientCert().getSocketFactory());
+            client.connect(conOpt);
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Exception: " + e.getMessage());
+        }
+    }
+
+
+    public void testInvalidUser() throws MqttException {
+        conOpt.setUserName("invalid-user");
+        try {
+            client.connect(conOpt);
+            fail("Authentication failure expected");
+        } catch (MqttException ex) {
+            Assert.assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Exception: " + e.getMessage());
+        }
+    }
+
+    public void testInvalidPassword() throws MqttException {
+        conOpt.setUserName("invalid-user");
+        conOpt.setPassword("invalid-password".toCharArray());
+        try {
+            client.connect(conOpt);
+            fail("Authentication failure expected");
+        } catch (MqttException ex) {
+            Assert.assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+        } catch (Exception e) {
+            e.printStackTrace();
+            fail("Exception: " + e.getMessage());
+        }
+    }
+
+
+    public void connectionLost(Throwable cause) {
+        if (!expectConnectionFailure)
+            fail("Connection unexpectedly lost");
+    }
+
+    public void messageArrived(String topic, MqttMessage message) throws Exception {
+        lastReceipt = System.currentTimeMillis();
+        receivedMessages.add(message);
+    }
+
+    public void deliveryComplete(IMqttDeliveryToken token) {
+    }
+}
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/tls/MutualAuth.java b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/com/rabbitmq/mqtt/test/tls/MutualAuth.java
new file mode 100644 (file)
index 0000000..a2d5d25
--- /dev/null
@@ -0,0 +1,84 @@
+package com.rabbitmq.mqtt.test.tls;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+import java.io.IOException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateException;
+import java.util.Arrays;
+import java.util.List;
+
+public class MutualAuth {
+
+    private MutualAuth() {
+
+    }
+
+    private static String getStringProperty(String propertyName) throws IllegalArgumentException {
+        Object value = System.getProperty(propertyName);
+        if (value == null) throw new IllegalArgumentException("Property: " + propertyName + " not found");
+        return value.toString();
+    }
+
+    private static TrustManagerFactory getServerTrustManagerFactory() throws NoSuchAlgorithmException, CertificateException, IOException, KeyStoreException {
+        char[] trustPhrase = getStringProperty("server.keystore.passwd").toCharArray();
+        MutualAuth dummy = new MutualAuth();
+
+        // Server TrustStore
+        KeyStore tks = KeyStore.getInstance("JKS");
+        tks.load(dummy.getClass().getResourceAsStream("/server.jks"), trustPhrase);
+
+        TrustManagerFactory tmf = TrustManagerFactory.getInstance("X509");
+        tmf.init(tks);
+
+        return tmf;
+    }
+
+    public static SSLContext getSSLContextWithClientCert() throws IOException {
+
+        char[] clientPhrase = getStringProperty("client.keystore.passwd").toCharArray();
+
+        MutualAuth dummy = new MutualAuth();
+        try {
+            SSLContext sslContext = getVanillaSSLContext();
+            // Client Keystore
+            KeyStore ks = KeyStore.getInstance("JKS");
+            ks.load(dummy.getClass().getResourceAsStream("/client.jks"), clientPhrase);
+            KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
+            kmf.init(ks, clientPhrase);
+
+            sslContext.init(kmf.getKeyManagers(), getServerTrustManagerFactory().getTrustManagers(), null);
+            return sslContext;
+        } catch (Exception e) {
+            throw new IOException(e);
+        }
+
+    }
+
+    private static SSLContext getVanillaSSLContext() throws NoSuchAlgorithmException {
+        SSLContext result = null;
+        List<String> xs = Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1");
+        for(String x : xs) {
+            try {
+                return SSLContext.getInstance(x);
+            } catch (NoSuchAlgorithmException nae) {
+                // keep trying
+            }
+        }
+        throw new NoSuchAlgorithmException("Could not obtain an SSLContext for TLS 1.0-1.2");
+    }
+
+    public static SSLContext getSSLContextWithoutCert() throws IOException {
+        try {
+            SSLContext sslContext = getVanillaSSLContext();
+            sslContext.init(null, getServerTrustManagerFactory().getTrustManagers(), null);
+            return sslContext;
+        } catch (Exception e) {
+            throw new IOException(e);
+        }
+    }
+
+}
diff --git a/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/test.config b/rabbitmq-server/plugins-src/rabbitmq-mqtt/test/src/test.config
new file mode 100644 (file)
index 0000000..3d6baff
--- /dev/null
@@ -0,0 +1,14 @@
+[{rabbitmq_mqtt, [
+   {ssl_cert_login,   true},
+   {allow_anonymous,  true},
+   {tcp_listeners,    [1883]},
+   {ssl_listeners,    [8883]}
+   ]},
+ {rabbit, [{ssl_options, [{cacertfile,"%%CERTS_DIR%%/testca/cacert.pem"},
+                          {certfile,"%%CERTS_DIR%%/server/cert.pem"},
+                          {keyfile,"%%CERTS_DIR%%/server/key.pem"},
+                          {verify,verify_peer},
+                          {fail_if_no_peer_cert,false}
+                         ]}
+          ]}
+].
index 313aeb5a4754c2464437af06b15519483ae1c6b9..ae60a49c629202e96834db95417cc17f1ded403a 100755 (executable)
@@ -1,3 +1,3 @@
 #!/bin/sh
-make -C `dirname $0` build_java_amqp
-make -C `dirname $0` test
+$MAKE -C `dirname $0` build_java_amqp
+$MAKE -C `dirname $0` test
diff --git a/rabbitmq-server/plugins-src/rabbitmq-shovel-management/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-shovel-management/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 42cad360c6957b5d4febd8aedd20991572431ee8..13827d67db42bdb4d946e14a64a884422b7a0c4e 100644 (file)
@@ -18,7 +18,14 @@ dispatcher_add(function(sammy) {
             var num_keys = ['prefetch-count', 'reconnect-delay'];
             var bool_keys = ['add-forward-headers'];
             var arrayable_keys = ['src-uri', 'dest-uri'];
+            var redirect = this.params['redirect'];
+            if (redirect != undefined) {
+                delete this.params['redirect'];
+            }
             put_parameter(this, [], num_keys, bool_keys, arrayable_keys);
+            if (redirect != undefined) {
+                go_to(redirect);
+            }
             return false;
         });
     sammy.del('#/shovel-parameters', function() {
@@ -66,7 +73,7 @@ HELP['shovel-delete-after'] =
 </dl>';
 
 function link_shovel(vhost, name) {
-    return _link_to(fmt_escape_html(name), '#/dynamic-shovels/' + esc(vhost) + '/' + esc(name));
+    return _link_to(name, '#/dynamic-shovels/' + esc(vhost) + '/' + esc(name));
 }
 
 function fmt_shovel_endpoint(prefix, shovel) {
index 61b4aa000464bc63728837ba55bfe99d751ec91e..9f7759ba25a8ccd07a9ba469fb2fe5fdf52c801f 100644 (file)
@@ -1,13 +1,9 @@
-<h1>Dynamic Shovel: <b><%= fmt_string(shovel.name) %></b></h1>
+<h1>Dynamic Shovel: <b><%= fmt_string(shovel.name) %></b><%= fmt_maybe_vhost(shovel.vhost) %></h1>
 
 <div class="section">
   <h2>Overview</h2>
   <div class="hider">
     <table class="facts">
-      <tr>
-        <th>Virtual host</th>
-        <td><%= fmt_string(shovel.vhost) %></td>
-      </tr>
       <tr>
         <th>Source</th>
         <td><%= fmt_string(shovel.value['src-uri']) %></td>
index 86812ef15990830d768152003e283cfe0cffc08b..3f5c846360a276562ae2bec126c8d45153c714a8 100644 (file)
@@ -65,7 +65,7 @@ filter_vhost_req(List, ReqData) ->
 %% static shovels do not have a vhost, so only allow admins (not
 %% monitors) to see them.
 filter_vhost_user(List, _ReqData, #context{user = User = #user{tags = Tags}}) ->
-    VHosts = rabbit_mgmt_util:list_login_vhosts(User),
+    VHosts = rabbit_mgmt_util:list_login_vhosts(User, undefined),
     [I || I <- List, case pget(vhost, I) of
                          undefined -> lists:member(administrator, Tags);
                          VHost     -> lists:member(VHost, VHosts)
@@ -117,8 +117,14 @@ lookup_src_dest(static, _Name) ->
     [];
 
 lookup_src_dest(dynamic, {VHost, Name}) ->
-    Def = pget(value,
-               rabbit_runtime_parameters:lookup(VHost, <<"shovel">>, Name)),
-    Ks = [<<"src-queue">>,  <<"src-exchange">>,  <<"src-exchange-key">>,
-          <<"dest-queue">>, <<"dest-exchange">>, <<"dest-exchange-key">>],
-    [{definition, [{K, V} || {K, V} <- Def, lists:member(K, Ks)]}].
+    case rabbit_runtime_parameters:lookup(VHost, <<"shovel">>, Name) of
+        %% We might not find anything if the shovel has been deleted
+        %% before we got here
+        not_found ->
+            [];
+        Props ->
+            Def = pget(value, Props),
+            Ks = [<<"src-queue">>, <<"src-exchange">>, <<"src-exchange-key">>,
+                  <<"dest-queue">>,<<"dest-exchange">>,<<"dest-exchange-key">>],
+            [{definition, [{K, V} || {K, V} <- Def, lists:member(K, Ks)]}]
+    end.
index 3fb8bdd86a123d452570a8255d52307eebc00fa3..b82c4e1ff609692143b936ad30727909beb6e28a 100644 (file)
@@ -23,6 +23,6 @@ all_tests() ->
 
 tests(Module, Timeout) ->
     {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun Module:F/0} ||
+     [{timeout, Timeout, fun () -> Module:F() end} ||
          {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
          string:right(atom_to_list(F), 5) =:= "_test"]}.
index d0a5c1beb93a7b54726faaee5120a68a820b4d1a..b3407ce083169d8606c8c21cdf94010a27c03396 100644 (file)
@@ -65,8 +65,27 @@ shovels_test() ->
     http_delete("/users/mon", ?NO_CONTENT),
     ok.
 
+%% It's a bit arbitrary to be testing this here, but we want to be
+%% able to test that mgmt extensions can be started and stopped
+%% *somewhere*, and here is as good a place as any.
+dynamic_plugin_enable_disable_test() ->
+    http_get("/shovels", ?OK),
+    disable_plugin("rabbitmq_shovel_management"),
+    http_get("/shovels", ?NOT_FOUND),
+    http_get("/overview", ?OK),
+    disable_plugin("rabbitmq_management"),
+    http_fail("/shovels"),
+    http_fail("/overview"),
+    enable_plugin("rabbitmq_management"),
+    http_get("/shovels", ?NOT_FOUND),
+    http_get("/overview", ?OK),
+    enable_plugin("rabbitmq_shovel_management"),
+    http_get("/shovels", ?OK),
+    http_get("/overview", ?OK),
+    passed.
+
 %%---------------------------------------------------------------------------
-%% TODO this is all copypasta from the mgmt tests
+%% TODO this is mostly copypasta from the mgmt tests
 
 http_get(Path) ->
     http_get(Path, ?OK).
@@ -80,6 +99,9 @@ http_get(Path, User, Pass, CodeExp) ->
     assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
     decode(CodeExp, Headers, ResBody).
 
+http_fail(Path) ->
+    {error, {failed_connect, _}} = req(get, Path, []).
+
 http_put(Path, List, CodeExp) ->
     http_put_raw(Path, format_for_upload(List), CodeExp).
 
@@ -179,3 +201,17 @@ test_item(Exp, Act) ->
 test_item0(Exp, Act) ->
     [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
                                       not lists:member(ExpI, Act)].
+%%---------------------------------------------------------------------------
+
+enable_plugin(Plugin) ->
+    plugins_action(enable, [Plugin], []).
+
+disable_plugin(Plugin) ->
+    plugins_action(disable, [Plugin], []).
+
+plugins_action(Command, Args, Opts) ->
+    PluginsFile = os:getenv("RABBITMQ_ENABLED_PLUGINS_FILE"),
+    PluginsDir = os:getenv("RABBITMQ_PLUGINS_DIR"),
+    Node = node(),
+    rpc:call(Node, rabbit_plugins_main, action,
+             [Command, Node, Args, Opts, PluginsFile, PluginsDir]).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-shovel/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-shovel/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index b0f0aaeb7f95b3e0b232eade4f7f7173f449efdf..5168c8f4b8675748de3315fae5ae2741d24d2631 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -record(endpoint,
index e416787531e9f85a6db746acec4d00e927be2fab..c945321ad4822c0f44559337e1df6b58435cc1e0 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel).
index 8d6bc5649f41bd3eef71c8341503df35b8332a03..a20b73bf1469d2595548d03019ca371d736cfef8 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_config).
 -include("rabbit_shovel.hrl").
 
 -define(IGNORE_FIELDS, [delete_after]).
+-define(EXTRA_KEYS, [add_forward_headers]).
 
 parse(ShovelName, Config) ->
     {ok, Defaults} = application:get_env(defaults),
     try
-        {ok, run_state_monad(
-               [fun enrich_shovel_config/1,
-                fun parse_shovel_config_proplist/1,
-                fun parse_shovel_config_dict/1],
-               {Config, Defaults})}
+        {ok, parse_shovel_config_dict(
+               ShovelName, parse_shovel_config_proplist(
+                             enrich_shovel_config(Config, Defaults)))}
     catch throw:{error, Reason} ->
             {error, {invalid_shovel_configuration, ShovelName, Reason}}
     end.
@@ -44,7 +43,7 @@ ensure_defaults(ShovelConfig, ParsedShovel) ->
                    {reconnect_delay,
                     ParsedShovel#shovel.reconnect_delay}).
 
-enrich_shovel_config({Config, Defaults}) ->
+enrich_shovel_config(Config, Defaults) ->
     Config1 = proplists:unfold(Config),
     case [E || E <- Config1, not (is_tuple(E) andalso tuple_size(E) == 2)] of
         []      -> case duplicate_keys(Config1) of
@@ -57,7 +56,7 @@ enrich_shovel_config({Config, Defaults}) ->
 parse_shovel_config_proplist(Config) ->
     Dict = dict:from_list(Config),
     Fields = record_info(fields, shovel) -- ?IGNORE_FIELDS,
-    Keys = dict:fetch_keys(Dict),
+    Keys = dict:fetch_keys(Dict) -- ?EXTRA_KEYS,
     case {Keys -- Fields, Fields -- Keys} of
         {[], []}      -> {_Pos, Dict1} =
                              lists:foldl(
@@ -72,24 +71,29 @@ parse_shovel_config_proplist(Config) ->
         {Unknown, _}  -> fail({unrecognised_parameters, Unknown})
     end.
 
-parse_shovel_config_dict(Dict) ->
-    run_state_monad(
-      [fun (Shovel) -> {ok, Value} = dict:find(Key, Dict),
-                       try {ParsedValue, Pos} = Fun(Value),
-                           return(setelement(Pos, Shovel, ParsedValue))
-                       catch throw:{error, Reason} ->
-                               fail({invalid_parameter_value, Key, Reason})
-                       end
-       end || {Fun, Key} <-
-                  [{fun parse_endpoint/1,             sources},
-                   {fun parse_endpoint/1,             destinations},
-                   {fun parse_non_negative_integer/1, prefetch_count},
-                   {fun parse_ack_mode/1,             ack_mode},
-                   {fun parse_binary/1,               queue},
-                   make_parse_publish(publish_fields),
-                   make_parse_publish(publish_properties),
-                   {fun parse_non_negative_number/1,  reconnect_delay}]],
-      #shovel{}).
+parse_shovel_config_dict(Name, Dict) ->
+    Cfg = run_state_monad(
+            [fun (Shovel) ->
+                     {ok, Value} = dict:find(Key, Dict),
+                     try {ParsedValue, Pos} = Fun(Value),
+                          return(setelement(Pos, Shovel, ParsedValue))
+                     catch throw:{error, Reason} ->
+                             fail({invalid_parameter_value, Key, Reason})
+                     end
+             end || {Fun, Key} <-
+                        [{fun parse_endpoint/1,             sources},
+                         {fun parse_endpoint/1,             destinations},
+                         {fun parse_non_negative_integer/1, prefetch_count},
+                         {fun parse_ack_mode/1,             ack_mode},
+                         {fun parse_binary/1,               queue},
+                         make_parse_publish(publish_fields),
+                         make_parse_publish(publish_properties),
+                         {fun parse_non_negative_number/1,  reconnect_delay}]],
+            #shovel{}),
+    case dict:find(add_forward_headers, Dict) of
+        {ok, true} -> add_forward_headers_fun(Name, Cfg);
+        _          -> Cfg
+    end.
 
 %% --=: Plain state monad implementation start :=--
 run_state_monad(FunList, State) ->
@@ -240,3 +244,14 @@ duplicate_keys(PropList) ->
     proplists:get_keys(
       lists:foldl(fun (K, L) -> lists:keydelete(K, 1, L) end, PropList,
                   proplists:get_keys(PropList))).
+
+add_forward_headers_fun(Name, #shovel{publish_properties = PubProps} = Cfg) ->
+    PubProps2 =
+        fun(SrcUri, DestUri, Props) ->
+                rabbit_shovel_util:update_headers(
+                  [{<<"shovelled-by">>, rabbit_nodes:cluster_name()},
+                   {<<"shovel-type">>,  <<"static">>},
+                   {<<"shovel-name">>,  list_to_binary(atom_to_list(Name))}],
+                  [], SrcUri, DestUri, PubProps(SrcUri, DestUri, Props))
+        end,
+    Cfg#shovel{publish_properties = PubProps2}.
index d5d5268e10105f98e4d24a2eb6a77c8783936da5..0e5991df4c336ec69a47ffdeaf5f3a801f6d695e 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_dyn_worker_sup).
index 9be7fd80e0121468d8ea68f001d4b09ed9e19997..38bbd50a3c19fc092e36cd72056fab97aab1ecd2 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_dyn_worker_sup_sup).
index dfcb3497de41a1ffabf7d42a90a73086f368954a..49bbcac748e1db5d160428d50d9ecc58da66035b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_parameters).
 -include("rabbit_shovel.hrl").
 
 -export([validate/5, notify/4, notify_clear/3]).
--export([register/0, parse/2]).
+-export([register/0, unregister/0, parse/2]).
 
 -import(rabbit_misc, [pget/2, pget/3]).
 
--define(ROUTING_HEADER, <<"x-shovelled">>).
-
 -rabbit_boot_step({?MODULE,
                    [{description, "shovel parameters"},
                     {mfa, {rabbit_shovel_parameters, register, []}},
+                    {cleanup, {?MODULE, unregister, []}},
                     {requires, rabbit_registry},
                     {enables, recovery}]}).
 
 register() ->
     rabbit_registry:register(runtime_parameter, <<"shovel">>, ?MODULE).
 
+unregister() ->
+    rabbit_registry:unregister(runtime_parameter, <<"shovel">>).
+
 validate(_VHost, <<"shovel">>, Name, Def, User) ->
     [case pget2(<<"src-exchange">>, <<"src-queue">>, Def) of
          zero -> {error, "Must specify 'src-exchange' or 'src-queue'", []};
@@ -84,6 +86,7 @@ validation(User) ->
      {<<"prefetch-count">>,  fun rabbit_parameter_validation:number/2,optional},
      {<<"reconnect-delay">>, fun rabbit_parameter_validation:number/2,optional},
      {<<"add-forward-headers">>, fun rabbit_parameter_validation:boolean/2,optional},
+     {<<"publish-properties">>, fun validate_properties/2,  optional},
      {<<"ack-mode">>,        rabbit_parameter_validation:enum(
                                ['no-ack', 'on-publish', 'on-confirm']), optional},
      {<<"delete-after">>,    fun validate_delete_after/2, optional}
@@ -114,11 +117,12 @@ validate_uri(Name, Term, User) ->
 validate_params_user(#amqp_params_direct{}, none) ->
     ok;
 validate_params_user(#amqp_params_direct{virtual_host = VHost},
-                     User = #user{username     = Username,
-                                  auth_backend = M}) ->
-    case rabbit_vhost:exists(VHost) andalso M:check_vhost_access(User, VHost) of
-        true  -> ok;
-        false -> {error, "user \"~s\" may not connect to vhost \"~s\"",
+                     User = #user{username = Username}) ->
+    case rabbit_vhost:exists(VHost) andalso
+        (catch rabbit_access_control:check_vhost_access(
+                 User, VHost, undefined)) of
+        ok -> ok;
+        _  -> {error, "user \"~s\" may not connect to vhost \"~s\"",
                   [Username, VHost]}
     end;
 validate_params_user(#amqp_params_network{}, _User) ->
@@ -131,6 +135,25 @@ validate_delete_after(Name,  Term) ->
     {error, "~s should be number, \"never\" or \"queue-length\", actually was "
      "~p", [Name, Term]}.
 
+%% TODO headers?
+validate_properties(Name, Term) ->
+    Str = fun rabbit_parameter_validation:binary/2,
+    Num = fun rabbit_parameter_validation:number/2,
+    rabbit_parameter_validation:proplist(
+      Name, [{<<"content_type">>,     Str, optional},
+             {<<"content_encoding">>, Str, optional},
+             {<<"delivery_mode">>,    Num, optional},
+             {<<"priority">>,         Num, optional},
+             {<<"correlation_id">>,   Str, optional},
+             {<<"reply_to">>,         Str, optional},
+             {<<"expiration">>,       Str, optional},
+             {<<"message_id">>,       Str, optional},
+             {<<"timestamp">>,        Num, optional},
+             {<<"type">>,             Str, optional},
+             {<<"user_id">>,          Str, optional},
+             {<<"app_id">>,           Str, optional},
+             {<<"cluster_id">>,       Str, optional}], Term).
+
 %%----------------------------------------------------------------------------
 
 parse({VHost, Name}, Def) ->
@@ -182,14 +205,17 @@ parse({VHost, Name}, Def) ->
              end,
     AddHeaders = pget(<<"add-forward-headers">>, Def, false),
     Table0 = [{<<"shovelled-by">>, rabbit_nodes:cluster_name()},
+              {<<"shovel-type">>,  <<"dynamic">>},
               {<<"shovel-name">>,  Name},
               {<<"shovel-vhost">>, VHost}],
-    PubPropsFun = fun (SrcURI, DestURI, P = #'P_basic'{headers = H}) ->
+    SetProps = lookup_indices(pget(<<"publish-properties">>, Def, []),
+                              record_info(fields, 'P_basic')),
+    PubPropsFun = fun (SrcURI, DestURI, P0) ->
+                          P = set_properties(P0, SetProps),
                           case AddHeaders of
-                              true  -> H1 = update_headers(
-                                              Table0, Table1 ++ Table2,
-                                              SrcURI, DestURI, H),
-                                       P#'P_basic'{headers = H1};
+                              true  -> rabbit_shovel_util:update_headers(
+                                         Table0, Table1 ++ Table2,
+                                         SrcURI, DestURI, P);
                               false -> P
                           end
                   end,
@@ -234,12 +260,19 @@ ensure_queue(Conn, Queue) ->
         catch amqp_channel:close(Ch)
     end.
 
-update_headers(Table0, Table1, SrcURI, DestURI, Headers) ->
-    Table = Table0 ++ [{<<"src-uri">>,  SrcURI},
-                       {<<"dest-uri">>, DestURI}] ++ Table1,
-    rabbit_basic:prepend_table_header(
-      ?ROUTING_HEADER, [{K, longstr, V} || {K, V} <- Table],
-      Headers).
-
 opt_b2a(B) when is_binary(B) -> list_to_atom(binary_to_list(B));
 opt_b2a(N)                   -> N.
+
+set_properties(Props, []) ->
+    Props;
+set_properties(Props, [{Ix, V} | Rest]) ->
+    set_properties(setelement(Ix, Props, V), Rest).
+
+lookup_indices(KVs, L) ->
+    [{1 + list_find(list_to_atom(binary_to_list(K)), L), V} || {K, V} <- KVs].
+
+list_find(K, L) -> list_find(K, L, 1).
+
+list_find(K, [K|_], N) -> N;
+list_find(K, [],   _N) -> exit({not_found, K});
+list_find(K, [_|L], N) -> list_find(K, L, N + 1).
index f30dbc4d847bef4e4d7b564aa1ccd28d0624e111..37d738ec020da27623981f2b2a8fddd0554b55c7 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_status).
index 488cef2e5d555de43f4db342bb469331363d20cb..b0bce9db22cb0640847b20d7bcf81c307a9b97a7 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_sup).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_util.erl b/rabbitmq-server/plugins-src/rabbitmq-shovel/src/rabbit_shovel_util.erl
new file mode 100644 (file)
index 0000000..a3b0f9c
--- /dev/null
@@ -0,0 +1,32 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_shovel_util).
+
+-export([update_headers/5]).
+
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-define(ROUTING_HEADER, <<"x-shovelled">>).
+
+update_headers(Prefix, Suffix, SrcURI, DestURI,
+               Props = #'P_basic'{headers = Headers}) ->
+    Table = Prefix ++ [{<<"src-uri">>,  SrcURI},
+                       {<<"dest-uri">>, DestURI}] ++ Suffix,
+    Headers2 = rabbit_basic:prepend_table_header(
+                 ?ROUTING_HEADER, [{K, longstr, V} || {K, V} <- Table],
+                 Headers),
+    Props#'P_basic'{headers = Headers2}.
index 38940d3444daa4a19fa30415a7e9403f908f0885..e5a8f638597eb2ad82d52b52877d3a12b6ad535d 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_worker).
@@ -88,6 +88,10 @@ handle_cast(init, State = #state{config = Config}) ->
     end,
 
     Remaining = remaining(InboundChan, Config),
+    case Remaining of
+        0 -> exit({shutdown, autodelete});
+        _ -> ok
+    end,
 
     #'basic.consume_ok'{} =
         amqp_channel:subscribe(
@@ -153,12 +157,16 @@ terminate(Reason, #state{inbound_conn = undefined, inbound_ch = undefined,
                          name = Name, type = Type}) ->
     rabbit_shovel_status:report(Name, Type, {terminated, Reason}),
     ok;
+terminate({shutdown, autodelete}, State = #state{name = {VHost, Name},
+                                                 type = dynamic}) ->
+    close_connections(State),
+    %% See rabbit_shovel_dyn_worker_sup_sup:stop_child/1
+    put(shovel_worker_autodelete, true),
+    rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name),
+    rabbit_shovel_status:remove({VHost, Name}),
+    ok;
 terminate(Reason, State) ->
-    maybe_autodelete(Reason, State),
-    catch amqp_connection:close(State#state.inbound_conn,
-                                ?MAX_CONNECTION_CLOSE_TIMEOUT),
-    catch amqp_connection:close(State#state.outbound_conn,
-                                ?MAX_CONNECTION_CLOSE_TIMEOUT),
+    close_connections(State),
     rabbit_shovel_status:report(State#state.name, State#state.type,
                                 {terminated, Reason}),
     ok.
@@ -228,14 +236,14 @@ make_conn_and_chan(URIs) ->
     {ok, Chan} = amqp_connection:open_channel(Conn),
     {Conn, Chan, list_to_binary(amqp_uri:remove_credentials(URI))}.
 
-remaining(Ch, #shovel{delete_after = never}) ->
+remaining(_Ch, #shovel{delete_after = never}) ->
     unlimited;
 remaining(Ch, #shovel{delete_after = 'queue-length', queue = Queue}) ->
     #'queue.declare_ok'{message_count = N} =
         amqp_channel:call(Ch, #'queue.declare'{queue   = Queue,
                                                passive = true}),
     N;
-remaining(Ch, #shovel{delete_after = Count}) ->
+remaining(_Ch, #shovel{delete_after = Count}) ->
     Count.
 
 decr_remaining(_N, State = #state{remaining = unlimited}) ->
@@ -253,10 +261,8 @@ decr_remaining_unacked(State = #state{remaining_unacked = 0}) ->
 decr_remaining_unacked(State = #state{remaining_unacked = N}) ->
     State#state{remaining_unacked = N - 1}.
 
-maybe_autodelete({shutdown, autodelete}, #state{name = {VHost, Name},
-                                                type = dynamic}) ->
-    %% See rabbit_shovel_dyn_worker_sup_sup:stop_child/1
-    put(shovel_worker_autodelete, true),
-    rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name);
-maybe_autodelete(_Reason, _State) ->
-    ok.
+close_connections(State) ->
+    catch amqp_connection:close(State#state.inbound_conn,
+                                ?MAX_CONNECTION_CLOSE_TIMEOUT),
+    catch amqp_connection:close(State#state.outbound_conn,
+                                ?MAX_CONNECTION_CLOSE_TIMEOUT).
index 3528f9bf2892ce6bddf96108ce4c9b0d508405d5..1705d5f0f0e2d91a28ea892fb8de8a3dd4696560 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_worker_sup).
index efb62a789eed5928a89c46a5e3a9f429db5033c3..6619112a7e07e6eb315ecb34a1c88c77df0e04a5 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_test).
@@ -153,7 +153,8 @@ main_test() ->
          {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]},
          {publish_properties, [{delivery_mode, 2},
                                {cluster_id,    <<"my-cluster">>},
-                               {content_type,  ?SHOVELLED}]}
+                               {content_type,  ?SHOVELLED}]},
+         {add_forward_headers, true}
         ]}],
       infinity),
 
@@ -196,7 +197,9 @@ main_test() ->
                             routing_key = ?FROM_SHOVEL },
          #amqp_msg { payload = <<42>>,
                      props   = #'P_basic' { delivery_mode = 2,
-                                            content_type  = ?SHOVELLED }
+                                            content_type  = ?SHOVELLED,
+                                            headers       = [{<<"x-shovelled">>,
+                                                              _, _}]}
                    }} ->
             ok = amqp_channel:call(Chan, #'basic.ack'{ delivery_tag = AckTag })
     after ?TIMEOUT -> throw(timeout_waiting_for_deliver1)
index 81b568ae94a924f171f106aec70bfbac2402f45b..2269ea85629a820e5aecdabb98531a37471f9161 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_test_all).
@@ -24,7 +24,7 @@ all_tests() ->
 
 tests(Module, Timeout) ->
     {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun Module:F/0} || F <- funs(Module, "_test")] ++
+     [{timeout, Timeout, fun () -> Module:F() end} || F <- funs(Module, "_test")] ++
          [{timeout, Timeout, Fun} || Gen <- funs(Module, "_test_"),
                                      Fun <- Module:Gen()]}.
 
index 5a8ead7c3fde17699bbfec7538e2d59edb7eb1b6..b3c74d8901a2034afe103c970425070c39792fc1 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_shovel_test_dyn).
@@ -29,6 +29,18 @@ simple_test() ->
               publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
       end).
 
+set_properties_test() ->
+    with_ch(
+      fun (Ch) ->
+              Ps = [{<<"src-queue">>,      <<"src">>},
+                    {<<"dest-queue">>,     <<"dest">>},
+                    {<<"publish-properties">>, [{<<"cluster_id">>, <<"x">>}]}],
+              set_param(<<"test">>, Ps),
+              #amqp_msg{props = #'P_basic'{cluster_id = Cluster}} =
+                  publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi">>),
+              ?assertEqual(<<"x">>, Cluster)
+      end).
+
 exchange_test() ->
     with_ch(
       fun (Ch) ->
@@ -141,6 +153,11 @@ validation_test() ->
     invalid_param([{<<"ack-mode">>,        <<"whenever">>} | QURIs]),
     invalid_param([{<<"delete-after">>,    <<"whenever">>} | QURIs]),
 
+    %% Check properties have to look property-ish
+    invalid_param([{<<"publish-properties">>, [{<<"nonexistent">>, <<>>}]}]),
+    invalid_param([{<<"publish-properties">>, [{<<"cluster_id">>, 2}]}]),
+    invalid_param([{<<"publish-properties">>, <<"something">>}]),
+
     %% Can't use explicit message count and no-ack together
     invalid_param([{<<"delete-after">>,    1},
                    {<<"ack-mode">>,        <<"no-ack">>} | QURIs]),
@@ -197,13 +214,14 @@ expect(Ch, Q, Payload) ->
     receive
         #'basic.consume_ok'{consumer_tag = CTag} -> ok
     end,
-    receive
-        {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
-            ok
-    after 1000 ->
-            exit({not_received, Payload})
-    end,
-    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}).
+    Msg = receive
+              {#'basic.deliver'{}, #amqp_msg{payload = Payload} = M} ->
+                  M
+          after 1000 ->
+                  exit({not_received, Payload})
+          end,
+    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+    Msg.
 
 expect_empty(Ch, Q) ->
     ?assertMatch(#'basic.get_empty'{},
@@ -239,7 +257,7 @@ invalid_param(Value) -> invalid_param(Value, none).
 valid_param(Value) -> valid_param(Value, none).
 
 lookup_user(Name) ->
-    {ok, User} = rabbit_auth_backend_internal:check_user_login(Name, []),
+    {ok, User} = rabbit_access_control:check_user_login(Name, []),
     User.
 
 clear_param(Name) ->
diff --git a/rabbitmq-server/plugins-src/rabbitmq-stomp/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-stomp/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index df7b89066e10fae0bed50cb8302cb0975a86a8aa..782d56331adf662775d0f270c32a73b890ae8532 100644 (file)
@@ -5,9 +5,10 @@ it, use <href="http://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html">rabbitmq
 
     rabbitmq-plugins enable rabbitmq_stomp
 
-Binaries for previous versions of the STOMP adapter can be obtained
-from
-<http://www.rabbitmq.com/plugins.html#rabbitmq-stomp>.
+## Supported STOMP Versions
 
-Full usage instructions can be found at
-<http://www.rabbitmq.com/stomp.html>.
+1.0 through 1.2.
+
+## Documentation
+
+[RabbitMQ STOMP plugin documentation](http://www.rabbitmq.com/stomp.html).
index a937fb5a396cc63c342993ae4cfda3566d1a5049..40f5bd1db747b3af2d047347d0e824b0dd3200cd 100644 (file)
@@ -1,8 +1,8 @@
-UPSTREAM_HG=https://stomppy.googlecode.com/hg/
-REVISION=16a4000624a7
+UPSTREAM_GIT=https://github.com/jasonrbriggs/stomp.py.git
+REVISION=v4.0.16
 
 LIB_DIR=stomppy
-CHECKOUT_DIR=stomppy-hg
+CHECKOUT_DIR=stomppy-git
 
 TARGETS=$(LIB_DIR)
 
@@ -14,14 +14,13 @@ clean:
 distclean: clean
        rm -rf $(CHECKOUT_DIR)
 
-$(LIB_DIR) : $(CHECKOUT_DIR) rabbit.patch
+$(LIB_DIR) : $(CHECKOUT_DIR)
        rm -rf $@
        cp -R $< $@
-       cd $@ && patch -p1 < ../rabbit.patch
 
 $(CHECKOUT_DIR):
-       hg clone $(UPSTREAM_HG) $@
-       (cd $@ && hg up $(REVISION)) || rm -rf $@
+       git clone $(UPSTREAM_GIT) $@
+       (cd $@ && git checkout $(REVISION)) || rm -rf $@
 
 echo-revision:
        @echo $(REVISION)
diff --git a/rabbitmq-server/plugins-src/rabbitmq-stomp/deps/stomppy/rabbit.patch b/rabbitmq-server/plugins-src/rabbitmq-stomp/deps/stomppy/rabbit.patch
deleted file mode 100644 (file)
index ceebd16..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-diff -r 16a4000624a7 stomp/connect.py
---- a/stomp/connect.py Sun May 02 18:15:34 2010 +0100
-+++ b/stomp/connect.py Fri Aug 26 15:35:33 2011 +0100
-@@ -88,7 +88,10 @@
-                  ssl_key_file = None,
-                  ssl_cert_file = None,
-                  ssl_ca_certs = None,
--                 ssl_cert_validator = None):
-+                 ssl_cert_validator = None,
-+                 version = None,
-+                 heartbeat = None,
-+                 virtual_host = None):
-         """
-         Initialize and start this connection.
-@@ -159,6 +162,16 @@
-             where OK is a boolean, and cert is a certificate structure
-             as returned by ssl.SSLSocket.getpeercert()
-+
-+        \param version
-+            (optional) stomp version header to send (comma separated)
-+
-+        \param heartbeat
-+            (optional) heartbeat header to send (STOMP 1.1)
-+
-+        \param virtual_host
-+            (optional) virtual_host header to send (STOMP 1.1)
-+
-         """
-         sorted_host_and_ports = []
-@@ -205,6 +218,15 @@
-             self.__connect_headers['login'] = user
-             self.__connect_headers['passcode'] = passcode
-+        if version is not None:
-+            self.__connect_headers['accept-version'] = version
-+
-+        if heartbeat is not None:
-+            self.__connect_headers['heart-beat'] = heartbeat
-+
-+        if virtual_host is not None:
-+            self.__connect_headers['host'] = virtual_host
-+
-         self.__socket = None
-         self.__socket_semaphore = threading.BoundedSemaphore(1)
-         self.__current_host_and_port = None
-@@ -383,6 +405,10 @@
-         """
-         self.__send_frame_helper('DISCONNECT', '', utils.merge_headers([self.__connect_headers, headers, keyword_headers]), [ ])
-         self.__running = False
-+        self.close_socket()
-+        self.__current_host_and_port = None
-+        
-+    def close_socket(self):
-         if self.__socket is not None:
-             if self.__ssl:
-                 #
-@@ -390,20 +416,23 @@
-                 #
-                 try:
-                     self.__socket = self.__socket.unwrap()
--                except Exception:
-+                except Exception as e:
-                     #
-                     # unwrap seems flaky on Win with the backported ssl mod, so catch any exception and log it
-                     #
--                    _, e, _ = sys.exc_info()
--                    log.warn(e)
-+                    log.warning("socket unwrap() threw exception: %s" % e)
-             elif hasattr(socket, 'SHUT_RDWR'):
--                self.__socket.shutdown(socket.SHUT_RDWR)
-+                try:
-+                    self.__socket.shutdown(socket.SHUT_RDWR)
-+                except Exception as e:
-+                    log.warning("socket shutdown() threw exception: %s" % e)
-         #
--        # split this into a separate check, because sometimes the socket is nulled between shutdown and this call
-+        # caution, because sometimes the socket is nulled between shutdown and this call
-         #
--        if self.__socket is not None:
-+        try:
-             self.__socket.close()
--        self.__current_host_and_port = None
-+        except Exception as e:
-+            log.warning("socket close() threw exception: %s" % e)
-     def __convert_dict(self, payload):
-         """
-@@ -449,6 +478,9 @@
-                 raise KeyError("Command %s requires header %r" % (command, required_header_key))
-         self.__send_frame(command, headers, payload)
-+    def send_frame(self, command, headers={}, payload=''):
-+        self.__send_frame(command, headers, payload)
-+
-     def __send_frame(self, command, headers={}, payload=''):
-         """
-         Send a STOMP frame.
-@@ -680,4 +712,4 @@
-                     sleep_exp += 1
-         if not self.__socket:
--            raise exception.ReconnectFailedException
-\ No newline at end of file
-+            raise exception.ReconnectFailedException
index 62504a0ee8b15c493a34f10b781a311052e999f0..d1497f4dd0d98f1b7f35912ef581442dfdf9a35b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -record(stomp_configuration, {default_login,
index 77a946c5cbb6911390df33a21e76fcd740772c1d..77d5810c164db55ff404f76b44bf16db05954874 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -record(stomp_frame, {command, headers, body_iolist}).
index c7ab43ca4345dd1b1d6b7508c0194d627b554bd4..398ce4216973abc50604c249ef983a6cf2c80f09 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -define(HEADER_ACCEPT_VERSION, "accept-version").
index bddbfde6c7bd6e9a33d0da9fbaa7641c08dd8395..67cb2c83cc8122cff3c1f8128daadd7159652636 100644 (file)
@@ -1,48 +1,24 @@
 RELEASABLE:=true
-DEPS:=rabbitmq-server rabbitmq-erlang-client
-STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose])
-WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/test.py $(PACKAGE_DIR)/test/src/test_connect_options.py
-WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests()
-
-RABBITMQ_TEST_PATH=$(PACKAGE_DIR)/../rabbitmq-test
-ABS_PACKAGE_DIR:=$(abspath $(PACKAGE_DIR))
-
-CERTS_DIR:=$(ABS_PACKAGE_DIR)/test/certs
-CAN_RUN_SSL:=$(shell if [ -d $(RABBITMQ_TEST_PATH) ]; then echo "true"; else echo "false"; fi)
-
-TEST_CONFIG_PATH=$(TEST_EBIN_DIR)/test.config
-WITH_BROKER_TEST_CONFIG:=$(TEST_EBIN_DIR)/test
-
-.PHONY: $(TEST_CONFIG_PATH)
-
-ifeq ($(CAN_RUN_SSL),true)
-
-WITH_BROKER_TEST_SCRIPTS += $(PACKAGE_DIR)/test/src/test_ssl.py
-
-$(TEST_CONFIG_PATH): $(CERTS_DIR) $(ABS_PACKAGE_DIR)/test/src/ssl.config
-       sed -e "s|%%CERTS_DIR%%|$(CERTS_DIR)|g" < $(ABS_PACKAGE_DIR)/test/src/ssl.config > $@
-       @echo "\nRunning SSL tests\n"
-
-$(CERTS_DIR):
-       mkdir -p $(CERTS_DIR)
-       make -C $(RABBITMQ_TEST_PATH)/certs all PASSWORD=test DIR=$(CERTS_DIR)
-
-else
-$(TEST_CONFIG_PATH): $(ABS_PACKAGE_DIR)/test/src/non_ssl.config
-       cp $(ABS_PACKAGE_DIR)/test/src/non_ssl.config $@
-       @echo "\nNOT running SSL tests - looked in $(RABBITMQ_TEST_PATH) \n"
-
-endif
+DEPS:=rabbitmq-server rabbitmq-erlang-client rabbitmq-test
+#STANDALONE_TEST_COMMANDS:=eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose])
+WITH_BROKER_TEST_SCRIPTS:=$(PACKAGE_DIR)/test/src/test.py $(PACKAGE_DIR)/test/src/test_connect_options.py $(PACKAGE_DIR)/test/src/test_ssl.py
+#WITH_BROKER_TEST_COMMANDS:=rabbit_stomp_test:all_tests() rabbit_stomp_amqqueue_test:all_tests()
+WITH_BROKER_TEST_CONFIG:=$(PACKAGE_DIR)/test/ebin/test
 
 define package_rules
 
-$(PACKAGE_DIR)+pre-test:: $(TEST_CONFIG_PATH)
-       make -C $(PACKAGE_DIR)/deps/stomppy
+$(PACKAGE_DIR)+pre-test::
+       rm -rf $(PACKAGE_DIR)/test/certs
+       mkdir $(PACKAGE_DIR)/test/certs
+       mkdir -p $(PACKAGE_DIR)/test/ebin
+       sed -e "s|%%CERTS_DIR%%|$(abspath $(PACKAGE_DIR))/test/certs|g" < $(PACKAGE_DIR)/test/src/test.config > $(PACKAGE_DIR)/test/ebin/test.config
+       $(MAKE) -C $(PACKAGE_DIR)/../rabbitmq-test/certs all PASSWORD=test DIR=$(abspath $(PACKAGE_DIR))/test/certs
+       $(MAKE) -C $(PACKAGE_DIR)/deps/stomppy
 
 $(PACKAGE_DIR)+clean::
-       rm -rf $(CERTS_DIR)
+       rm -rf $(PACKAGE_DIR)/test/certs
 
 $(PACKAGE_DIR)+clean-with-deps::
-       make -C $(PACKAGE_DIR)/deps/stomppy distclean
+       $(MAKE) -C $(PACKAGE_DIR)/deps/stomppy distclean
 
 endef
index bb8f7f9a129329b4d009b7d2649a8e5d65d5540b..bd867aab1a2e7b791ddea8661bdb8a09eac8d889 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp).
index d0f41b7e5459795470b2847e2e7293eb826ed093..4f293edb1c735b48d90d43f8e4adebd7b25f34d9 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_client_sup).
index 335a3a817261f698f1d1ce6c3869021f84880017..ecd636524f722a1584bda2bd7079b1773ce4c94f 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% stomp_frame implements the STOMP framing protocol "version 1.0", as
@@ -151,9 +151,11 @@ insert_header(Headers, Name, Value) ->
         false -> [{Name, Value} | Headers]
     end.
 
-parse_body(Content, Frame) ->
-    parse_body(Content, Frame, [],
-               integer_header(Frame, ?HEADER_CONTENT_LENGTH, unknown)).
+parse_body(Content, Frame = #stomp_frame{command = Command}) ->
+    case Command of
+        "SEND" -> parse_body(Content, Frame, [], integer_header(Frame, ?HEADER_CONTENT_LENGTH, unknown));
+        _ -> parse_body(Content, Frame, [], unknown)
+    end.
 
 parse_body(Content, Frame, Chunks, unknown) ->
     parse_body2(Content, Frame, Chunks, case firstnull(Content) of
@@ -193,6 +195,9 @@ boolean_header(#stomp_frame{headers = Headers}, Key) ->
     case lists:keysearch(Key, 1, Headers) of
         {value, {_, "true"}}  -> {ok, true};
         {value, {_, "false"}} -> {ok, false};
+        %% some Python clients serialize True/False as "True"/"False"
+        {value, {_, "True"}}  -> {ok, true};
+        {value, {_, "False"}} -> {ok, false};
         _                     -> not_found
     end.
 
@@ -228,7 +233,7 @@ serialize(#stomp_frame{command = Command,
          Len > 0 -> [?HEADER_CONTENT_LENGTH ++ ":", integer_to_list(Len), ?LF];
          true    -> []
      end,
-     ?LF, BodyFragments, 0].
+     ?LF, BodyFragments, 0, ?LF].
 
 serialize_header({K, V}) when is_integer(V) -> hdr(escape(K), integer_to_list(V));
 serialize_header({K, V}) when is_list(V)    -> hdr(escape(K), escape(V)).
index 7d8ce2723fcb46ae61f6082fe9eda6e84869ac64..0a6dae72ce2a9891a406605750db9127fd77bc0a 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_processor).
@@ -146,8 +146,11 @@ handle_info(#'basic.cancel_ok'{}, State) ->
 handle_info(#'basic.ack'{delivery_tag = Tag, multiple = IsMulti}, State) ->
     {noreply, flush_pending_receipts(Tag, IsMulti, State), hibernate};
 handle_info({Delivery = #'basic.deliver'{},
-             #amqp_msg{props = Props, payload = Payload}}, State) ->
-    {noreply, send_delivery(Delivery, Props, Payload, State), hibernate};
+             #amqp_msg{props = Props, payload = Payload},
+             DeliveryCtx}, State) ->
+    State1 = send_delivery(Delivery, Props, Payload, State),
+    amqp_channel:notify_received(DeliveryCtx),
+    {noreply, State1, hibernate};
 handle_info(#'basic.cancel'{consumer_tag = Ctag}, State) ->
     process_request(
       fun(StateN) -> server_cancel_consumer(Ctag, StateN) end, State);
@@ -155,9 +158,24 @@ handle_info({'EXIT', Conn,
              {shutdown, {server_initiated_close, Code, Explanation}}},
             State = #state{connection = Conn}) ->
     amqp_death(Code, Explanation, State);
+handle_info({'EXIT', Conn,
+             {shutdown, {connection_closing,
+                         {server_initiated_close, Code, Explanation}}}},
+            State = #state{connection = Conn}) ->
+    amqp_death(Code, Explanation, State);
 handle_info({'EXIT', Conn, Reason}, State = #state{connection = Conn}) ->
     send_error("AMQP connection died", "Reason: ~p", [Reason], State),
     {stop, {conn_died, Reason}, State};
+
+handle_info({'EXIT', Ch, Reason}, State = #state{channel = Ch}) ->
+    send_error("AMQP channel died", "Reason: ~p", [Reason], State),
+    {stop, {channel_died, Reason}, State};
+handle_info({'EXIT', Ch,
+             {shutdown, {server_initiated_close, Code, Explanation}}},
+            State = #state{channel = Ch}) ->
+    amqp_death(Code, Explanation, State);
+
+
 handle_info({inet_reply, _, ok}, State) ->
     {noreply, State, hibernate};
 handle_info({bump_credit, Msg}, State) ->
@@ -294,10 +312,10 @@ handle_frame("SEND", Frame, State) ->
         end);
 
 handle_frame("ACK", Frame, State) ->
-    ack_action("ACK", Frame, State, fun create_ack_method/2);
+    ack_action("ACK", Frame, State, fun create_ack_method/3);
 
 handle_frame("NACK", Frame, State) ->
-    ack_action("NACK", Frame, State, fun create_nack_method/2);
+    ack_action("NACK", Frame, State, fun create_nack_method/3);
 
 handle_frame("BEGIN", Frame, State) ->
     transactional_action(Frame, "BEGIN", fun begin_transaction/2, State);
@@ -329,7 +347,8 @@ ack_action(Command, Frame,
                 {ok, {ConsumerTag, _SessionId, DeliveryTag}} ->
                     case dict:find(ConsumerTag, Subs) of
                         {ok, Sub} ->
-                            Method = MethodFun(DeliveryTag, Sub),
+                            Requeue = rabbit_stomp_frame:boolean_header(Frame, "requeue", true),
+                            Method = MethodFun(DeliveryTag, Sub, Requeue),
                             case transactional(Frame) of
                                 {yes, Transaction} ->
                                     extend_transaction(
@@ -430,7 +449,7 @@ maybe_delete_durable_sub({topic, Name}, Frame,
                                            ?HEADER_PERSISTENT, false) of
         true ->
             {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
-            QName = rabbit_stomp_util:durable_subscription_queue(Name, Id),
+            QName = rabbit_stomp_util:subscription_queue_name(Name, Id),
             amqp_channel:call(Channel,
                               #'queue.delete'{queue  = list_to_binary(QName),
                                               nowait = false}),
@@ -507,6 +526,8 @@ do_login(Username, Passwd, VirtualHost, Heartbeat, AdapterInfo, Version,
         {ok, Connection} ->
             link(Connection),
             {ok, Channel} = amqp_connection:open_channel(Connection),
+            link(Channel),
+            amqp_channel:enable_delivery_flow_control(Channel),
             SessionId = rabbit_guid:string(rabbit_guid:gen_secure(), "session"),
             {{SendTimeout, ReceiveTimeout}, State1} =
                 ensure_heartbeats(Heartbeat, State),
@@ -656,13 +677,14 @@ do_send(Destination, _DestHdr,
             Err
     end.
 
-create_ack_method(DeliveryTag, #subscription{multi_ack = IsMulti}) ->
+create_ack_method(DeliveryTag, #subscription{multi_ack = IsMulti}, _) ->
     #'basic.ack'{delivery_tag = DeliveryTag,
                  multiple     = IsMulti}.
 
-create_nack_method(DeliveryTag, #subscription{multi_ack = IsMulti}) ->
+create_nack_method(DeliveryTag, #subscription{multi_ack = IsMulti}, Requeue) ->
     #'basic.nack'{delivery_tag = DeliveryTag,
-                  multiple     = IsMulti}.
+                  multiple     = IsMulti,
+                  requeue      = Requeue}.
 
 negotiate_version(Frame) ->
     ClientVers = re:split(rabbit_stomp_frame:header(
@@ -691,6 +713,7 @@ send_delivery(Delivery = #'basic.deliver'{consumer_tag = ConsumerTag},
                        State)
     end.
 
+
 send_method(Method, Channel, State) ->
     amqp_channel:call(Channel, Method),
     State.
@@ -960,12 +983,20 @@ ensure_endpoint(source, EndPoint, Frame, Channel, State) ->
                           {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
                           {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
                           list_to_binary(
-                            rabbit_stomp_util:durable_subscription_queue(Name,
-                                                                         Id))
+                            rabbit_stomp_util:subscription_queue_name(Name,
+                                                                      Id))
                   end},
                  {durable, true}];
             false ->
-                [{durable, false}]
+                [{subscription_queue_name_gen,
+                  fun () ->
+                          Id = rabbit_guid:gen_secure(),
+                          {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
+                          list_to_binary(
+                            rabbit_stomp_util:subscription_queue_name(Name,
+                                                                      Id))
+                  end},
+                 {durable, false}]
         end,
     rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint, Params, State);
 
index f4b01753b01270ed9acc75c84f2aa00f9ef96f91..673afeebb76a69cedced8942bf2aee5d736d81dc 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_reader).
 
 -export([start_link/3]).
--export([init/3]).
+-export([init/3, mainloop/2]).
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
 -export([conserve_resources/3]).
 
 -include("rabbit_stomp.hrl").
@@ -25,7 +26,8 @@
 -include_lib("amqp_client/include/amqp_client.hrl").
 
 -record(reader_state, {socket, parse_state, processor, state,
-                       conserve_resources, recv_outstanding}).
+                       conserve_resources, recv_outstanding,
+                       parent}).
 
 %%----------------------------------------------------------------------------
 
@@ -48,7 +50,7 @@ go(SupHelperPid, ProcessorPid, Configuration) ->
                 {ok, ConnStr} ->
                     case SockTransform(Sock0) of
                         {ok, Sock} ->
-
+                            DebugOpts = sys:debug_options([]),
                             ProcInitArgs = processor_args(SupHelperPid,
                                                           Configuration,
                                                           Sock),
@@ -59,7 +61,7 @@ go(SupHelperPid, ProcessorPid, Configuration) ->
 
                             ParseState = rabbit_stomp_frame:initial_state(),
                             try
-                                mainloop(
+                                mainloop(DebugOpts,
                                   register_resource_alarm(
                                     #reader_state{socket             = Sock,
                                                   parse_state        = ParseState,
@@ -72,35 +74,43 @@ go(SupHelperPid, ProcessorPid, Configuration) ->
                             catch _:Ex ->
                                 log_network_error(ConnStr, Ex),
                                 rabbit_net:fast_close(Sock),
+                                rabbit_stomp_processor:flush_and_die(ProcessorPid),
                                 exit(normal)
                             end,
                             done;
                         {error, enotconn} ->
                             rabbit_net:fast_close(Sock0),
+                            rabbit_stomp_processor:flush_and_die(ProcessorPid),
                             exit(normal);
                         {error, Reason} ->
                             log_network_error(ConnStr, Reason),
                             rabbit_net:fast_close(Sock0),
+                            rabbit_stomp_processor:flush_and_die(ProcessorPid),
                             exit(normal)
                         end
             end
     end.
 
-mainloop(State0 = #reader_state{socket = Sock}) ->
+mainloop(DebugOpts, State0 = #reader_state{socket = Sock}) ->
     State = run_socket(control_throttle(State0)),
     receive
         {inet_async, Sock, _Ref, {ok, Data}} ->
-            mainloop(process_received_bytes(
+            mainloop(DebugOpts, process_received_bytes(
                        Data, State#reader_state{recv_outstanding = false}));
         {inet_async, _Sock, _Ref, {error, closed}} ->
             ok;
         {inet_async, _Sock, _Ref, {error, Reason}} ->
             throw({inet_error, Reason});
+        {inet_reply, _Sock, {error, closed}} ->
+            ok;
         {conserve_resources, Conserve} ->
-            mainloop(State#reader_state{conserve_resources = Conserve});
+            mainloop(DebugOpts, State#reader_state{conserve_resources = Conserve});
         {bump_credit, Msg} ->
             credit_flow:handle_bump_msg(Msg),
-            mainloop(State);
+            mainloop(DebugOpts, State);
+        {system, From, Request} ->
+            sys:handle_system_msg(Request, From, State#reader_state.parent,
+                                 ?MODULE, DebugOpts, State);
         {'EXIT', _From, shutdown} ->
             ok;
         Other ->
@@ -158,6 +168,17 @@ run_socket(State = #reader_state{socket = Sock}) ->
 
 %%----------------------------------------------------------------------------
 
+system_continue(Parent, DebugOpts, State) ->
+    mainloop(DebugOpts, State#reader_state{parent = Parent}).
+
+system_terminate(Reason, _Parent, _OldVsn, _Extra) ->
+    exit(Reason).
+
+system_code_change(Misc, _Module, _OldSvn, _Extra) ->
+    {ok, Misc}.
+
+%%----------------------------------------------------------------------------
+
 processor_args(SupPid, Configuration, Sock) ->
     SendFun = fun (sync, IoData) ->
                       %% no messages emitted
index 01fa5959b483cfef6cf56667fa599b3df8dd35de..d376e5e81e234d50ee538a46659cf569ee93dfe5 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_sup).
@@ -25,14 +25,17 @@ start_link(Listeners, Configuration) ->
     supervisor:start_link({local, ?MODULE}, ?MODULE,
                           [Listeners, Configuration]).
 
-init([{Listeners, SslListeners}, Configuration]) ->
+init([{Listeners, SslListeners0}, Configuration]) ->
     {ok, SocketOpts} = application:get_env(rabbitmq_stomp, tcp_listen_options),
-
-    SslOpts = case SslListeners of
-                  [] -> none;
-                  _  -> rabbit_networking:ensure_ssl()
-              end,
-
+    {SslOpts, SslListeners}
+        = case SslListeners0 of
+              [] -> {none, []};
+              _  -> {rabbit_networking:ensure_ssl(),
+                     case rabbit_networking:poodle_check('STOMP') of
+                         ok     -> SslListeners0;
+                         danger -> []
+                     end}
+          end,
     {ok, {{one_for_all, 10, 10},
           [{rabbit_stomp_client_sup_sup,
             {rabbit_client_sup, start_link,
index d91ba433e3b1fbd2879ee2328bcac3c00d70dae4..bb8530ea0b9b789a0164110222eadfa4418b74da 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_util).
 
--export([parse_message_id/1, durable_subscription_queue/2]).
+-export([parse_message_id/1, subscription_queue_name/2]).
 -export([longstr_field/2]).
 -export([ack_mode/1, consumer_tag_reply_to/1, consumer_tag/1, message_headers/1,
          headers_post_process/1, headers/5, message_properties/1, tag_to_id/1,
@@ -277,14 +277,14 @@ format_destination(Exchange, RoutingKey) ->
 %% Destination Parsing
 %%--------------------------------------------------------------------
 
-durable_subscription_queue(Destination, SubscriptionId) ->
+subscription_queue_name(Destination, SubscriptionId) ->
     %% We need a queue name that a) can be derived from the
     %% Destination and SubscriptionId, and b) meets the constraints on
     %% AMQP queue names. It doesn't need to be secure; we use md5 here
     %% simply as a convenient means to bound the length.
     rabbit_guid:string(
       erlang:md5(term_to_binary({Destination, SubscriptionId})),
-      "stomp.dsub").
+      "stomp-subscription").
 
 %% ---- Helpers ----
 
index 0b24c4207f27d778b4ec9052693733d14ca36c9c..4c47cd3de9646a1d022822767cfca6890f1ea0d0 100644 (file)
@@ -6,14 +6,15 @@ import time
 class TestAck(base.BaseTest):
 
     def test_ack_client(self):
-        d = "/queue/ack-test"
+        destination = "/queue/ack-test"
 
         # subscribe and send message
         self.listener.reset(2) ## expecting 2 messages
-        self.conn.subscribe(destination=d, ack='client',
+        self.subscribe_dest(self.conn, destination, None,
+                            ack='client',
                             headers={'prefetch-count': '10'})
-        self.conn.send("test1", destination=d)
-        self.conn.send("test2", destination=d)
+        self.conn.send(destination, "test1")
+        self.conn.send(destination, "test2")
         self.assertTrue(self.listener.await(4), "initial message not received")
         self.assertEquals(2, len(self.listener.messages))
 
@@ -26,37 +27,39 @@ class TestAck(base.BaseTest):
             listener2 = base.WaitableListener()
             listener2.reset(2)
             conn2.set_listener('', listener2)
-            conn2.subscribe(destination=d, ack='client',
-                            headers={'prefetch-count': '10'})
+            self.subscribe_dest(conn2, destination, None,
+                                ack='client',
+                                headers={'prefetch-count': '10'})
             self.assertTrue(listener2.await(), "message not received again")
             self.assertEquals(2, len(listener2.messages))
 
             # now ack only the last message - expecting cumulative behaviour
-            mid = listener2.messages[1]['headers']['message-id']
-            conn2.ack({'message-id':mid})
+            mid = listener2.messages[1]['headers'][self.ack_id_source_header]
+            self.ack_message(conn2, mid, None)
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
         # now reconnect again, shouldn't see the message
         conn3 = self.create_connection()
         try:
             listener3 = base.WaitableListener()
             conn3.set_listener('', listener3)
-            conn3.subscribe(destination=d)
+            self.subscribe_dest(conn3, destination, None)
             self.assertFalse(listener3.await(3),
                              "unexpected message. ACK not working?")
         finally:
-            conn3.stop()
+            conn3.disconnect()
 
     def test_ack_client_individual(self):
-        d = "/queue/ack-test-individual"
+        destination = "/queue/ack-test-individual"
 
         # subscribe and send message
         self.listener.reset(2) ## expecting 2 messages
-        self.conn.subscribe(destination=d, ack='client-individual',
+        self.subscribe_dest(self.conn, destination, None,
+                            ack='client-individual',
                             headers={'prefetch-count': '10'})
-        self.conn.send("test1", destination=d)
-        self.conn.send("test2", destination=d)
+        self.conn.send(destination, "test1")
+        self.conn.send(destination, "test2")
         self.assertTrue(self.listener.await(4), "Both initial messages not received")
         self.assertEquals(2, len(self.listener.messages))
 
@@ -69,8 +72,9 @@ class TestAck(base.BaseTest):
             listener2 = base.WaitableListener()
             listener2.reset(2) ## expect 2 messages
             conn2.set_listener('', listener2)
-            conn2.subscribe(destination=d, ack='client-individual',
-                            headers={'prefetch-count': '10'})
+            self.subscribe_dest(conn2, destination, None,
+                                ack='client-individual',
+                                headers={'prefetch-count': '10'})
             self.assertTrue(listener2.await(2.5), "Did not receive 2 messages")
             self.assertEquals(2, len(listener2.messages), "Not exactly 2 messages received")
 
@@ -79,13 +83,13 @@ class TestAck(base.BaseTest):
             mid = None
             for ind in range(nummsgs):
                 if listener2.messages[ind]['message']=="test2":
-                    mid = listener2.messages[ind]['headers']['message-id']
+                    mid = listener2.messages[ind]['headers'][self.ack_id_source_header]
                     self.assertEquals(1, ind, 'Expecting test2 to be second message')
                     break
             self.assertTrue(mid, "Did not find test2 message id.")
-            conn2.ack({'message-id':mid})
+            self.ack_message(conn2, mid, None)
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
         # now reconnect again, shouldn't see the message
         conn3 = self.create_connection()
@@ -93,21 +97,21 @@ class TestAck(base.BaseTest):
             listener3 = base.WaitableListener()
             listener3.reset(2) ## expecting a single message, but wait for two
             conn3.set_listener('', listener3)
-            conn3.subscribe(destination=d)
+            self.subscribe_dest(conn3, destination, None)
             self.assertFalse(listener3.await(2.5),
                              "Expected to see only one message. ACK not working?")
             self.assertEquals(1, len(listener3.messages), "Expecting exactly one message")
             self.assertEquals("test1", listener3.messages[0]['message'], "Unexpected message remains")
         finally:
-            conn3.stop()
+            conn3.disconnect()
 
     def test_ack_client_tx(self):
-        d = "/queue/ack-test-tx"
+        destination = "/queue/ack-test-tx"
 
         # subscribe and send message
         self.listener.reset()
-        self.conn.subscribe(destination=d, ack='client')
-        self.conn.send("test", destination=d)
+        self.subscribe_dest(self.conn, destination, None, ack='client')
+        self.conn.send(destination, "test")
         self.assertTrue(self.listener.await(3), "initial message not received")
         self.assertEquals(1, len(self.listener.messages))
 
@@ -121,77 +125,94 @@ class TestAck(base.BaseTest):
             listener2 = base.WaitableListener()
             conn2.set_listener('', listener2)
             conn2.begin(transaction=tx)
-            conn2.subscribe(destination=d, ack='client')
+            self.subscribe_dest(conn2, destination, None, ack='client')
             self.assertTrue(listener2.await(), "message not received again")
             self.assertEquals(1, len(listener2.messages))
 
             # now ack
-            mid = listener2.messages[0]['headers']['message-id']
-            conn2.ack({'message-id':mid, 'transaction':tx})
+            mid = listener2.messages[0]['headers'][self.ack_id_source_header]
+            self.ack_message(conn2, mid, None, transaction=tx)
 
             #now commit
             conn2.commit(transaction=tx)
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
         # now reconnect again, shouldn't see the message
         conn3 = self.create_connection()
         try:
             listener3 = base.WaitableListener()
             conn3.set_listener('', listener3)
-            conn3.subscribe(destination=d)
+            self.subscribe_dest(conn3, destination, None)
             self.assertFalse(listener3.await(3),
                              "unexpected message. TX ACK not working?")
         finally:
-            conn3.stop()
+            conn3.disconnect()
 
     def test_topic_prefetch(self):
-        d = "/topic/prefetch-test"
+        destination = "/topic/prefetch-test"
 
         # subscribe and send message
         self.listener.reset(6) ## expect 6 messages
-        self.conn.subscribe(destination=d, ack='client',
+        self.subscribe_dest(self.conn, destination, None,
+                            ack='client',
                             headers={'prefetch-count': '5'})
 
         for x in range(10):
-            self.conn.send("test" + str(x), destination=d)
+            self.conn.send(destination, "test" + str(x))
 
         self.assertFalse(self.listener.await(3),
                          "Should not have been able to see 6 messages")
         self.assertEquals(5, len(self.listener.messages))
 
     def test_nack(self):
-        d = "/queue/nack-test"
+        destination = "/queue/nack-test"
 
         #subscribe and send
-        self.conn.subscribe(destination=d, ack='client-individual')
-        self.conn.send("nack-test", destination=d)
+        self.subscribe_dest(self.conn, destination, None,
+                            ack='client-individual')
+        self.conn.send(destination, "nack-test")
 
         self.assertTrue(self.listener.await(), "Not received message")
-        message_id = self.listener.messages[0]['headers']['message-id']
+        message_id = self.listener.messages[0]['headers'][self.ack_id_source_header]
         self.listener.reset()
 
-        self.conn.send_frame("NACK", {"message-id" : message_id})
+        self.nack_message(self.conn, message_id, None)
         self.assertTrue(self.listener.await(), "Not received message after NACK")
-        message_id = self.listener.messages[0]['headers']['message-id']
-        self.conn.ack({'message-id' : message_id})
+        message_id = self.listener.messages[0]['headers'][self.ack_id_source_header]
+        self.ack_message(self.conn, message_id, None)
 
     def test_nack_multi(self):
-        d = "/queue/nack-multi"
+        destination = "/queue/nack-multi"
 
         self.listener.reset(2)
 
         #subscribe and send
-        self.conn.subscribe(destination=d, ack='client',
+        self.subscribe_dest(self.conn, destination, None,
+                            ack='client',
                             headers = {'prefetch-count' : '10'})
-        self.conn.send("nack-test1", destination=d)
-        self.conn.send("nack-test2", destination=d)
+        self.conn.send(destination, "nack-test1")
+        self.conn.send(destination, "nack-test2")
 
         self.assertTrue(self.listener.await(), "Not received messages")
-        message_id = self.listener.messages[1]['headers']['message-id']
+        message_id = self.listener.messages[1]['headers'][self.ack_id_source_header]
         self.listener.reset(2)
 
-        self.conn.send_frame("NACK", {"message-id" : message_id})
+        self.nack_message(self.conn, message_id, None)
         self.assertTrue(self.listener.await(), "Not received message again")
-        message_id = self.listener.messages[1]['headers']['message-id']
-        self.conn.ack({'message-id' : message_id})
+        message_id = self.listener.messages[1]['headers'][self.ack_id_source_header]
+        self.ack_message(self.conn, message_id, None)
+
+    def test_nack_without_requeueing(self):
+        destination = "/queue/nack-test-no-requeue"
+
+        self.subscribe_dest(self.conn, destination, None,
+                            ack='client-individual')
+        self.conn.send(destination, "nack-test")
+
+        self.assertTrue(self.listener.await(), "Not received message")
+        message_id = self.listener.messages[0]['headers'][self.ack_id_source_header]
+        self.listener.reset()
+
+        self.conn.send_frame("NACK", {self.ack_id_header: message_id, "requeue": False})
+        self.assertFalse(self.listener.await(4), "Received message after NACK with requeue = False")
index 4db8433d065d284886d14925b97d5fb143c58adc..e3d5819767736e7862665210708d2c0d886c56a4 100644 (file)
@@ -6,18 +6,84 @@ import threading
 
 class BaseTest(unittest.TestCase):
 
-   def create_connection(self, version=None, heartbeat=None):
-       conn = stomp.Connection(user="guest", passcode="guest",
-                               version=version, heartbeat=heartbeat)
+   def create_connection_obj(self, version='1.0', vhost='/', heartbeats=(0, 0)):
+       if version == '1.0':
+           conn = stomp.StompConnection10()
+           self.ack_id_source_header = 'message-id'
+           self.ack_id_header = 'message-id'
+       elif version == '1.1':
+           conn = stomp.StompConnection11(vhost=vhost,
+                                          heartbeats=heartbeats)
+           self.ack_id_source_header = 'message-id'
+           self.ack_id_header = 'message-id'
+       elif version == '1.2':
+           conn = stomp.StompConnection12(vhost=vhost,
+                                          heartbeats=heartbeats)
+           self.ack_id_source_header = 'ack'
+           self.ack_id_header = 'id'
+       else:
+           conn = stomp.StompConnection12(vhost=vhost,
+                                          heartbeats=heartbeats)
+           conn.version = version
+       return conn
+
+   def create_connection(self, user='guest', passcode='guest', wait=True, **kwargs):
+       conn = self.create_connection_obj(**kwargs)
        conn.start()
-       conn.connect()
+       conn.connect(user, passcode, wait=wait)
        return conn
 
+   def subscribe_dest(self, conn, destination, sub_id, **kwargs):
+       if type(conn) is stomp.StompConnection10:
+           # 'id' is optional in STOMP 1.0.
+           if sub_id != None:
+               kwargs['id'] = sub_id
+           conn.subscribe(destination, **kwargs)
+       else:
+           # 'id' is required in STOMP 1.1+.
+           if sub_id == None:
+               sub_id = 'ctag'
+           conn.subscribe(destination, sub_id, **kwargs)
+
+   def unsubscribe_dest(self, conn, destination, sub_id, **kwargs):
+       if type(conn) is stomp.StompConnection10:
+           # 'id' is optional in STOMP 1.0.
+           if sub_id != None:
+               conn.unsubscribe(id=sub_id, **kwargs)
+           else:
+               conn.unsubscribe(destination=destination, **kwargs)
+       else:
+           # 'id' is required in STOMP 1.1+.
+           if sub_id == None:
+               sub_id = 'ctag'
+           conn.unsubscribe(sub_id, **kwargs)
+
+   def ack_message(self, conn, msg_id, sub_id, **kwargs):
+       if type(conn) is stomp.StompConnection10:
+           conn.ack(msg_id, **kwargs)
+       elif type(conn) is stomp.StompConnection11:
+           if sub_id == None:
+               sub_id = 'ctag'
+           conn.ack(msg_id, sub_id, **kwargs)
+       elif type(conn) is stomp.StompConnection12:
+           conn.ack(msg_id, **kwargs)
+
+   def nack_message(self, conn, msg_id, sub_id, **kwargs):
+       if type(conn) is stomp.StompConnection10:
+           # Normally unsupported by STOMP 1.0.
+           conn.send_frame("NACK", {"message-id": msg_id})
+       elif type(conn) is stomp.StompConnection11:
+           if sub_id == None:
+               sub_id = 'ctag'
+           conn.nack(msg_id, sub_id, **kwargs)
+       elif type(conn) is stomp.StompConnection12:
+           conn.nack(msg_id, **kwargs)
+
    def create_subscriber_connection(self, dest):
        conn = self.create_connection()
        listener = WaitableListener()
        conn.set_listener('', listener)
-       conn.subscribe(destination=dest, receipt="sub.receipt")
+       self.subscribe_dest(conn, dest, None, receipt="sub.receipt")
        listener.await()
        self.assertEquals(1, len(listener.receipts))
        listener.reset()
@@ -30,13 +96,14 @@ class BaseTest(unittest.TestCase):
 
    def tearDown(self):
         if self.conn.is_connected():
+            self.conn.disconnect()
             self.conn.stop()
 
    def simple_test_send_rec(self, dest, route = None):
         self.listener.reset()
 
-        self.conn.subscribe(destination=dest)
-        self.conn.send("foo", destination=dest)
+        self.subscribe_dest(self.conn, dest, None)
+        self.conn.send(dest, "foo")
 
         self.assertTrue(self.listener.await(), "Timeout, no message received")
 
index c9e4ad5aaa307b8cacb899dee33230d388f690c1..d802bc666d37534cb74dc66bc87881d7c49a06c2 100644 (file)
@@ -14,7 +14,8 @@ class TestConnectOptions(base.BaseTest):
         new_conn.set_listener('', listener)
 
         new_conn.start() # not going to issue connect
-        new_conn.subscribe(destination="/topic/implicit", id='sub_implicit', receipt='implicit')
+        self.subscribe_dest(new_conn, "/topic/implicit", 'sub_implicit',
+                            receipt='implicit')
 
         try:
             self.assertTrue(listener.await(5))
index 18dec83bd2f8c7daddf14d4d2871eb2cf9dc9740..b1d0cd1914766fa97ef064d7dfe9b915e36f5be1 100644 (file)
@@ -25,7 +25,8 @@ class TestExchange(base.BaseTest):
     def test_invalid_exchange(self):
         ''' Test invalid exchange error '''
         self.listener.reset(1)
-        self.conn.subscribe(destination="/exchange/does.not.exist")
+        self.subscribe_dest(self.conn, "/exchange/does.not.exist", None,
+                            ack="auto")
         self.assertListener("Expecting an error", numErrs=1)
         err = self.listener.errors[0]
         self.assertEquals("not_found", err['headers']['message'])
@@ -49,15 +50,15 @@ class TestQueue(base.BaseTest):
 
     def test_send_receive(self):
         ''' Test basic send/receive for /queue '''
-        d = '/queue/test'
-        self.simple_test_send_rec(d)
+        destination = '/queue/test'
+        self.simple_test_send_rec(destination)
 
     def test_send_receive_in_other_conn(self):
         ''' Test send in one connection, receive in another '''
-        d = '/queue/test2'
+        destination = '/queue/test2'
 
         # send
-        self.conn.send("hello", destination=d)
+        self.conn.send(destination, "hello")
 
         # now receive
         conn2 = self.create_connection()
@@ -65,19 +66,19 @@ class TestQueue(base.BaseTest):
             listener2 = base.WaitableListener()
             conn2.set_listener('', listener2)
 
-            conn2.subscribe(destination=d)
+            self.subscribe_dest(conn2, destination, None, ack="auto")
             self.assertTrue(listener2.await(10), "no receive")
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
     def test_send_receive_in_other_conn_with_disconnect(self):
         ''' Test send, disconnect, receive '''
-        d = '/queue/test3'
+        destination = '/queue/test3'
 
         # send
-        self.conn.send("hello thar", destination=d, receipt="foo")
+        self.conn.send(destination, "hello thar", receipt="foo")
         self.listener.await(3)
-        self.conn.stop()
+        self.conn.disconnect()
 
         # now receive
         conn2 = self.create_connection()
@@ -85,24 +86,24 @@ class TestQueue(base.BaseTest):
             listener2 = base.WaitableListener()
             conn2.set_listener('', listener2)
 
-            conn2.subscribe(destination=d)
+            self.subscribe_dest(conn2, destination, None, ack="auto")
             self.assertTrue(listener2.await(10), "no receive")
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
 
     def test_multi_subscribers(self):
         ''' Test multiple subscribers against a single /queue destination '''
-        d = '/queue/test-multi'
+        destination = '/queue/test-multi'
 
         ## set up two subscribers
-        conn1, listener1 = self.create_subscriber_connection(d)
-        conn2, listener2 = self.create_subscriber_connection(d)
+        conn1, listener1 = self.create_subscriber_connection(destination)
+        conn2, listener2 = self.create_subscriber_connection(destination)
 
         try:
             ## now send
-            self.conn.send("test1", destination=d)
-            self.conn.send("test2", destination=d)
+            self.conn.send(destination, "test1")
+            self.conn.send(destination, "test2")
 
             ## expect both consumers to get a message?
             self.assertTrue(listener1.await(2))
@@ -112,16 +113,16 @@ class TestQueue(base.BaseTest):
             self.assertEquals(1, len(listener2.messages),
                               "unexpected message count")
         finally:
-            conn1.stop()
-            conn2.stop()
+            conn1.disconnect()
+            conn2.disconnect()
 
     def test_send_with_receipt(self):
-        d = '/queue/test-receipt'
+        destination = '/queue/test-receipt'
         def noop(): pass
-        self.__test_send_receipt(d, noop, noop)
+        self.__test_send_receipt(destination, noop, noop)
 
     def test_send_with_receipt_tx(self):
-        d = '/queue/test-receipt-tx'
+        destination = '/queue/test-receipt-tx'
         tx = 'receipt.tx'
 
         def before():
@@ -131,19 +132,19 @@ class TestQueue(base.BaseTest):
             self.assertFalse(self.listener.await(1))
             self.conn.commit(transaction=tx)
 
-        self.__test_send_receipt(d, before, after, {'transaction': tx})
+        self.__test_send_receipt(destination, before, after, {'transaction': tx})
 
     def test_interleaved_receipt_no_receipt(self):
         ''' Test i-leaved receipt/no receipt, no-r bracketed by rs '''
 
-        d = '/queue/ir'
+        destination = '/queue/ir'
 
         self.listener.reset(5)
 
-        self.conn.subscribe(destination=d)
-        self.conn.send('first', destination=d, receipt='a')
-        self.conn.send('second', destination=d)
-        self.conn.send('third', destination=d, receipt='b')
+        self.subscribe_dest(self.conn, destination, None, ack="auto")
+        self.conn.send(destination, 'first', receipt='a')
+        self.conn.send(destination, 'second')
+        self.conn.send(destination, 'third', receipt='b')
 
         self.assertListener("Missing messages/receipts", numMsgs=3, numRcts=2, timeout=3)
 
@@ -152,18 +153,18 @@ class TestQueue(base.BaseTest):
     def test_interleaved_receipt_no_receipt_tx(self):
         ''' Test i-leaved receipt/no receipt, no-r bracketed by r+xactions '''
 
-        d = '/queue/ir'
+        destination = '/queue/ir'
         tx = 'tx.ir'
 
         # three messages and two receipts
         self.listener.reset(5)
 
-        self.conn.subscribe(destination=d)
+        self.subscribe_dest(self.conn, destination, None, ack="auto")
         self.conn.begin(transaction=tx)
 
-        self.conn.send('first', destination=d, receipt='a', transaction=tx)
-        self.conn.send('second', destination=d, transaction=tx)
-        self.conn.send('third', destination=d, receipt='b', transaction=tx)
+        self.conn.send(destination, 'first', receipt='a', transaction=tx)
+        self.conn.send(destination, 'second', transaction=tx)
+        self.conn.send(destination, 'third', receipt='b', transaction=tx)
         self.conn.commit(transaction=tx)
 
         self.assertListener("Missing messages/receipts", numMsgs=3, numRcts=2, timeout=40)
@@ -176,14 +177,14 @@ class TestQueue(base.BaseTest):
     def test_interleaved_receipt_no_receipt_inverse(self):
         ''' Test i-leaved receipt/no receipt, r bracketed by no-rs '''
 
-        d = '/queue/ir'
+        destination = '/queue/ir'
 
         self.listener.reset(4)
 
-        self.conn.subscribe(destination=d)
-        self.conn.send('first', destination=d)
-        self.conn.send('second', destination=d, receipt='a')
-        self.conn.send('third', destination=d)
+        self.subscribe_dest(self.conn, destination, None, ack="auto")
+        self.conn.send(destination, 'first')
+        self.conn.send(destination, 'second', receipt='a')
+        self.conn.send(destination, 'third')
 
         self.assertListener("Missing messages/receipt", numMsgs=3, numRcts=1, timeout=3)
 
@@ -199,7 +200,7 @@ class TestQueue(base.BaseTest):
         for x in range(0, count):
             receipt = "test" + str(x)
             expected_receipts.add(receipt)
-            self.conn.send("test receipt", destination=destination,
+            self.conn.send(destination, "test receipt",
                            receipt=receipt, headers=headers)
         after()
 
@@ -221,16 +222,16 @@ class TestTopic(base.BaseTest):
 
       def test_send_receive(self):
         ''' Test basic send/receive for /topic '''
-        d = '/topic/test'
-        self.simple_test_send_rec(d)
+        destination = '/topic/test'
+        self.simple_test_send_rec(destination)
 
       def test_send_multiple(self):
           ''' Test /topic with multiple consumers '''
-          d = '/topic/multiple'
+          destination = '/topic/multiple'
 
           ## set up two subscribers
-          conn1, listener1 = self.create_subscriber_connection(d)
-          conn2, listener2 = self.create_subscriber_connection(d)
+          conn1, listener1 = self.create_subscriber_connection(destination)
+          conn2, listener2 = self.create_subscriber_connection(destination)
 
           try:
               ## listeners are expecting 2 messages
@@ -238,8 +239,8 @@ class TestTopic(base.BaseTest):
               listener2.reset(2)
 
               ## now send
-              self.conn.send("test1", destination=d)
-              self.conn.send("test2", destination=d)
+              self.conn.send(destination, "test1")
+              self.conn.send(destination, "test2")
 
               ## expect both consumers to get both messages
               self.assertTrue(listener1.await(5))
@@ -249,8 +250,38 @@ class TestTopic(base.BaseTest):
               self.assertEquals(2, len(listener2.messages),
                                 "unexpected message count")
           finally:
-              conn1.stop()
-              conn2.stop()
+              conn1.disconnect()
+              conn2.disconnect()
+
+      def test_send_multiple_with_a_large_message(self):
+          ''' Test /topic with multiple consumers '''
+          destination = '/topic/16mb'
+          # payload size
+          s = 1024 * 1024 * 16
+          message = 'x' * s
+
+          conn1, listener1 = self.create_subscriber_connection(destination)
+          conn2, listener2 = self.create_subscriber_connection(destination)
+
+          try:
+              listener1.reset(2)
+              listener2.reset(2)
+
+              self.conn.send(destination, message)
+              self.conn.send(destination, message)
+
+              self.assertTrue(listener1.await(10))
+              self.assertEquals(2, len(listener1.messages),
+                                "unexpected message count")
+              self.assertTrue(len(listener2.messages[0]['message']) == s,
+                              "unexpected message size")
+
+              self.assertTrue(listener2.await(10))
+              self.assertEquals(2, len(listener2.messages),
+                                "unexpected message count")
+          finally:
+              conn1.disconnect()
+              conn2.disconnect()
 
 class TestReplyQueue(base.BaseTest):
 
@@ -268,7 +299,7 @@ class TestReplyQueue(base.BaseTest):
         conn2, listener2 = self.create_subscriber_connection(known)
 
         try:
-            self.conn.send("test", destination=known,
+            self.conn.send(known, "test",
                            headers = {"reply-to": reply})
 
             self.assertTrue(listener2.await(5))
@@ -277,11 +308,11 @@ class TestReplyQueue(base.BaseTest):
             reply_to = listener2.messages[0]['headers']['reply-to']
             self.assertTrue(reply_to.startswith('/reply-queue/'))
 
-            conn2.send("reply", destination=reply_to)
+            conn2.send(reply_to, "reply")
             self.assertTrue(self.listener.await(5))
             self.assertEquals("reply", self.listener.messages[0]['message'])
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
     def test_reuse_reply_queue(self):
         ''' Test re-use of reply-to queue '''
@@ -295,7 +326,7 @@ class TestReplyQueue(base.BaseTest):
             self.assertEquals(1, len(listna.messages))
             reply_to = listna.messages[0]['headers']['reply-to']
             self.assertTrue(reply_to.startswith('/reply-queue/'))
-            cntn.send("reply", destination=reply_to)
+            cntn.send(reply_to, "reply")
 
         ## Client 1 uses pre-supplied connection and listener
         ## Set up clients 2 and 3
@@ -303,9 +334,9 @@ class TestReplyQueue(base.BaseTest):
         conn3, listener3 = self.create_subscriber_connection(known3)
         try:
             self.listener.reset(2)
-            self.conn.send("test2", destination=known2,
+            self.conn.send(known2, "test2",
                            headers = {"reply-to": reply})
-            self.conn.send("test3", destination=known3,
+            self.conn.send(known3, "test3",
                            headers = {"reply-to": reply})
             respond(conn2, listener2)
             respond(conn3, listener3)
@@ -315,8 +346,8 @@ class TestReplyQueue(base.BaseTest):
             self.assertEquals("reply", self.listener.messages[0]['message'])
             self.assertEquals("reply", self.listener.messages[1]['message'])
         finally:
-            conn2.stop()
-            conn3.stop()
+            conn2.disconnect()
+            conn3.disconnect()
 
     def test_perm_reply_queue(self):
         '''As test_reply_queue, but with a non-temp reply queue'''
@@ -330,7 +361,7 @@ class TestReplyQueue(base.BaseTest):
         conn2, listener2 = self.create_subscriber_connection(known)
 
         try:
-            conn1.send("test", destination=known,
+            conn1.send(known, "test",
                        headers = {"reply-to": reply})
 
             self.assertTrue(listener2.await(5))
@@ -339,12 +370,12 @@ class TestReplyQueue(base.BaseTest):
             reply_to = listener2.messages[0]['headers']['reply-to']
             self.assertTrue(reply_to == reply)
 
-            conn2.send("reply", destination=reply_to)
+            conn2.send(reply_to, "reply")
             self.assertTrue(listener1.await(5))
             self.assertEquals("reply", listener1.messages[0]['message'])
         finally:
-            conn1.stop()
-            conn2.stop()
+            conn1.disconnect()
+            conn2.disconnect()
 
 class TestDurableSubscription(base.BaseTest):
 
@@ -356,10 +387,9 @@ class TestDurableSubscription(base.BaseTest):
         if not id:
             id = TestDurableSubscription.ID
 
-        conn.subscribe(destination=dest,
-                       headers    ={'persistent': 'true',
-                                    'receipt': 1,
-                                    'id': id})
+        self.subscribe_dest(conn, dest, id, ack="auto",
+                            headers = {'persistent': 'true',
+                                       'receipt': 1})
 
     def __assert_receipt(self, listener=None, pos=None):
         if not listener:
@@ -381,69 +411,69 @@ class TestDurableSubscription(base.BaseTest):
             self.assertEquals(pos, self.listener.messages[0]['msg_no'])
 
     def test_durable_subscription(self):
-        d = '/topic/durable'
+        destination = '/topic/durable'
 
-        self.__subscribe(d)
+        self.__subscribe(destination)
         self.__assert_receipt()
 
         # send first message without unsubscribing
         self.listener.reset(1)
-        self.conn.send("first", destination=d)
+        self.conn.send(destination, "first")
         self.__assert_message("first")
 
         # now unsubscribe (disconnect only)
-        self.conn.unsubscribe(id=TestDurableSubscription.ID)
+        self.unsubscribe_dest(self.conn, destination, TestDurableSubscription.ID)
 
         # send again
         self.listener.reset(2)
-        self.conn.send("second", destination=d)
+        self.conn.send(destination, "second")
 
         # resubscribe and expect receipt
-        self.__subscribe(d)
+        self.__subscribe(destination)
         self.__assert_receipt(pos=1)
         # and message
         self.__assert_message("second", pos=2)
 
         # now unsubscribe (cancel)
-        self.conn.unsubscribe(id=TestDurableSubscription.ID,
+        self.unsubscribe_dest(self.conn, destination, TestDurableSubscription.ID,
                               headers={'persistent': 'true'})
 
         # send again
         self.listener.reset(1)
-        self.conn.send("third", destination=d)
+        self.conn.send(destination, "third")
 
         # resubscribe and expect no message
-        self.__subscribe(d)
+        self.__subscribe(destination)
         self.assertTrue(self.listener.await(3))
         self.assertEquals(0, len(self.listener.messages))
         self.assertEquals(1, len(self.listener.receipts))
 
     def test_share_subscription(self):
-        d = '/topic/durable-shared'
+        destination = '/topic/durable-shared'
 
         conn2 = self.create_connection()
         conn2.set_listener('', self.listener)
 
         try:
-            self.__subscribe(d)
+            self.__subscribe(destination)
             self.__assert_receipt()
             self.listener.reset(1)
-            self.__subscribe(d, conn2)
+            self.__subscribe(destination, conn2)
             self.__assert_receipt()
 
             self.listener.reset(100)
 
             # send 100 messages
             for x in xrange(0, 100):
-                self.conn.send("msg" + str(x), destination=d)
+                self.conn.send(destination, "msg" + str(x))
 
             self.assertTrue(self.listener.await(5))
             self.assertEquals(100, len(self.listener.messages))
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
     def test_separate_ids(self):
-        d = '/topic/durable-separate'
+        destination = '/topic/durable-separate'
 
         conn2 = self.create_connection()
         listener2 = base.WaitableListener()
@@ -451,36 +481,35 @@ class TestDurableSubscription(base.BaseTest):
 
         try:
             # ensure durable subscription exists for each ID
-            self.__subscribe(d)
+            self.__subscribe(destination)
             self.__assert_receipt()
-            self.__subscribe(d, conn2, "other.id")
+            self.__subscribe(destination, conn2, "other.id")
             self.__assert_receipt(listener2)
-            self.conn.unsubscribe(id=TestDurableSubscription.ID)
-            conn2.unsubscribe(id="other.id")
+            self.unsubscribe_dest(self.conn, destination, TestDurableSubscription.ID)
+            self.unsubscribe_dest(conn2, destination, "other.id")
 
             self.listener.reset(101)
             listener2.reset(101) ## 100 messages and 1 receipt
 
             # send 100 messages
             for x in xrange(0, 100):
-                self.conn.send("msg" + str(x), destination=d)
+                self.conn.send(destination, "msg" + str(x))
 
-            self.__subscribe(d)
-            self.__subscribe(d, conn2, "other.id")
+            self.__subscribe(destination)
+            self.__subscribe(destination, conn2, "other.id")
 
             for l in [self.listener, listener2]:
-                self.assertTrue(l.await(10))
+                self.assertTrue(l.await(20))
                 self.assertEquals(100, len(l.messages))
 
         finally:
-            conn2.stop()
+            conn2.disconnect()
 
     def test_durable_subscribe_no_id(self):
-        d = '/topic/durable-invalid'
+        destination = '/topic/durable-invalid'
 
-        self.conn.subscribe(destination=d, headers={'persistent':'true'}),
+        self.conn.send_frame('SUBSCRIBE',
+            {'destination': destination, 'ack': 'auto', 'persistent': 'true'})
         self.listener.await(3)
         self.assertEquals(1, len(self.listener.errors))
         self.assertEquals("Missing Header", self.listener.errors[0]['headers']['message'])
-
-
index 917542928bcf14d3ac6e6964bdea872ff4e6bd17..e52b3ac0622362fc773d1c2f829f91f0a361aa93 100644 (file)
@@ -28,7 +28,7 @@ class TestErrors(base.BaseTest):
 
     def test_unknown_destination(self):
         self.listener.reset()
-        self.conn.send(destination="/something/interesting")
+        self.conn.send("/something/interesting", 'test_unknown_destination')
 
         self.assertTrue(self.listener.await())
         self.assertEquals(1, len(self.listener.errors))
@@ -54,7 +54,7 @@ class TestErrors(base.BaseTest):
 
     def __test_invalid_destination(self, dtype, content):
         self.listener.reset()
-        self.conn.send(destination="/" + dtype + content)
+        self.conn.send("/" + dtype + content, '__test_invalid_destination:' + dtype + content)
 
         self.assertTrue(self.listener.await())
         self.assertEquals(1, len(self.listener.errors))
index 902994e229a64f1e7d9d49ea25f66cf6394f2527..ff9b119841b496644246d5c9d85876df6d3b9a70 100644 (file)
@@ -45,6 +45,16 @@ class TestLifecycle(base.BaseTest):
         d = "/queue/unsub04"
         self.unsub_test(d, self.sub_and_send(d, subid="queid", receipt="unsub.rct"), numRcts=1)
 
+    def test_connect_version_1_0(self):
+        ''' Test CONNECT with version 1.0'''
+        self.conn.disconnect()
+        new_conn = self.create_connection(version="1.0")
+        try:
+            self.assertTrue(new_conn.is_connected())
+        finally:
+            new_conn.disconnect()
+            self.assertFalse(new_conn.is_connected())
+
     def test_connect_version_1_1(self):
         ''' Test CONNECT with version 1.1'''
         self.conn.disconnect()
@@ -55,10 +65,20 @@ class TestLifecycle(base.BaseTest):
             new_conn.disconnect()
             self.assertFalse(new_conn.is_connected())
 
+    def test_connect_version_1_2(self):
+        ''' Test CONNECT with version 1.2'''
+        self.conn.disconnect()
+        new_conn = self.create_connection(version="1.2")
+        try:
+            self.assertTrue(new_conn.is_connected())
+        finally:
+            new_conn.disconnect()
+            self.assertFalse(new_conn.is_connected())
+
     def test_heartbeat_disconnects_client(self):
         ''' Test heart-beat disconnection'''
         self.conn.disconnect()
-        new_conn = self.create_connection(heartbeat="1500,0")
+        new_conn = self.create_connection(version='1.1', heartbeats=(1500, 0))
         try:
             self.assertTrue(new_conn.is_connected())
             time.sleep(1)
@@ -71,37 +91,28 @@ class TestLifecycle(base.BaseTest):
 
     def test_unsupported_version(self):
         ''' Test unsupported version on CONNECT command'''
-        self.bad_connect(stomp.Connection(user="guest",
-                                          passcode="guest",
-                                          version="100.1"),
-                         "Supported versions are 1.0,1.1,1.2\n")
+        self.bad_connect("Supported versions are 1.0,1.1,1.2\n", version='100.1')
 
     def test_bad_username(self):
         ''' Test bad username'''
-        self.bad_connect(stomp.Connection(user="gust",
-                                          passcode="guest"),
-                         "Access refused for user 'gust'\n")
+        self.bad_connect("Access refused for user 'gust'\n", user='gust')
 
     def test_bad_password(self):
         ''' Test bad password'''
-        self.bad_connect(stomp.Connection(user="guest",
-                                          passcode="gust"),
-                         "Access refused for user 'guest'\n")
+        self.bad_connect("Access refused for user 'guest'\n", passcode='gust')
 
     def test_bad_vhost(self):
         ''' Test bad virtual host'''
-        self.bad_connect(stomp.Connection(user="guest",
-                                          passcode="guest",
-                                          virtual_host="//"),
-                         "Virtual host '//' access denied")
+        self.bad_connect("Virtual host '//' access denied", version='1.1', vhost='//')
 
-    def bad_connect(self, new_conn, expected):
+    def bad_connect(self, expected, user='guest', passcode='guest', **kwargs):
         self.conn.disconnect()
+        new_conn = self.create_connection_obj(**kwargs)
         listener = base.WaitableListener()
         new_conn.set_listener('', listener)
         try:
             new_conn.start()
-            new_conn.connect()
+            new_conn.connect(user, passcode)
             self.assertTrue(listener.await())
             self.assertEquals(expected, listener.errors[0]['message'])
         finally:
@@ -136,7 +147,7 @@ class TestLifecycle(base.BaseTest):
 
     def unsub_test(self, dest, verbs, numRcts=0):
         def afterfun():
-            self.conn.send("after-test", destination=dest)
+            self.conn.send(dest, "after-test")
         subverb, unsubverb = verbs
         self.assertListenerAfter(subverb, numMsgs=1,
                            errMsg="FAILED to subscribe and send")
@@ -145,20 +156,13 @@ class TestLifecycle(base.BaseTest):
         self.assertListenerAfter(afterfun,
                            errMsg="Still receiving messages")
 
-    def sub_and_send(self, dest, subid="", receipt=""):
+    def sub_and_send(self, dest, subid=None, receipt=None):
         def subfun():
-            if subid=="":
-                self.conn.subscribe(destination=dest)
-            else:
-                self.conn.subscribe(destination=dest, id=subid)
-            self.conn.send("test", destination=dest)
+            self.subscribe_dest(self.conn, dest, subid)
+            self.conn.send(dest, "test")
         def unsubfun():
-            if subid=="" and receipt=="":
-                self.conn.unsubscribe(destination=dest)
-            elif receipt=="":
-                self.conn.unsubscribe(id=subid)
-            elif subid=="":
-                self.conn.unsubscribe(destination=dest, receipt=receipt)
-            else:
-                self.conn.unsubscribe(id=subid, receipt=receipt)
+            headers = {}
+            if receipt != None:
+                headers['receipt'] = receipt
+            self.unsubscribe_dest(self.conn, dest, subid, **headers)
         return subfun, unsubfun
diff --git a/rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/non_ssl.config b/rabbitmq-server/plugins-src/rabbitmq-stomp/test/src/non_ssl.config
deleted file mode 100644 (file)
index f0c2ca7..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-[{rabbitmq_stomp, [{default_user, [{login,    "guest"},
-                                   {passcode, "guest"}
-                                  ]},
-                   {implicit_connect, true}
-                  ]}
-].
index 58d5bcf9867db499a71b2a26e9b8dd6315212e75..2e08836381ac212884b2df8615ace5d1ceb9e9ff 100644 (file)
@@ -51,8 +51,8 @@ class TestParsing(unittest.TestCase):
 
 
     def match(self, pattern, data):
-        ''' helper: try to match 'pattern' regexp with 'data' string.
-            Fail test if they don't match.
+        ''' helper: try to match a regexp with a string.
+            Fail test if they do not match.
         '''
         matched = re.match(pattern, data)
         if matched:
@@ -196,47 +196,6 @@ class TestParsing(unittest.TestCase):
         for cd in [self.cd1, self.cd2]:
             self.match(resp, cd.recv(4096))
 
-
-    @connect(['cd'])
-    def test_huge_message(self):
-        ''' Test sending/receiving huge (16MB) message. '''
-        subscribe=( 'SUBSCRIBE\n'
-                    'id: xxx\n'
-                    'destination:/exchange/amq.topic/test_huge_message\n'
-                    '\n\0')
-        self.cd.sendall(subscribe)
-
-        message = 'x' * 1024*1024*16
-
-        self.cd.sendall('SEND\n'
-                        'destination:/exchange/amq.topic/test_huge_message\n'
-                        'content-type:text/plain\n'
-                        '\n'
-                        '%s'
-                        '\0' % message)
-
-        resp=('MESSAGE\n'
-            'subscription:(.*)\n'
-            'destination:/topic/test_huge_message\n'
-            'message-id:(.*)\n'
-            'content-type:text/plain\n'
-            'content-length:%i\n'
-            '\n'
-            '%s(.*)'
-             % (len(message), message[:8000]) )
-
-        recv = []
-        s = 0
-        while len(recv) < 1 or recv[-1][-1] != '\0':
-            buf =  self.cd.recv(4096*16)
-            s += len(buf)
-            recv.append( buf )
-        buf = ''.join(recv)
-
-        # matching 100MB regexp is way too expensive.
-        self.match(resp, buf[:8192])
-        self.assertEqual(len(buf) > len(message), True)
-
     @connect(['cd'])
     def test_message_with_embedded_nulls(self):
         ''' Test sending/receiving message with embedded nulls. '''
index 013f7cb4e499292b40cf3792c75249859e70776d..42c18edbf7ad51fe909e086545c60f460a299b02 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_amqqueue_test).
index 615c6a7b83b75fe441dc5ba78c3d537fa0fc2d53..a67e9aac231100c13a51ccde2127da2141df6634 100644 (file)
 
 run() ->
     [put(K, 0) || K <- [sent, recd, last_sent, last_recd]],
-    put(last_ts, erlang:now()),
+    put(last_ts, os:timestamp()),
     {ok, Pub} = rabbit_stomp_client:connect(),
     {ok, Recv} = rabbit_stomp_client:connect(),
     Self = self(),
-    spawn(fun() -> publish(Self, Pub, 0, erlang:now()) end),
+    spawn(fun() -> publish(Self, Pub, 0, os:timestamp()) end),
     rabbit_stomp_client:send(
       Recv, "SUBSCRIBE", [{"destination", ?DESTINATION}]),
-    spawn(fun() -> recv(Self, Recv, 0, erlang:now()) end),
+    spawn(fun() -> recv(Self, Recv, 0, os:timestamp()) end),
     report().
 
 report() ->
@@ -49,13 +49,13 @@ report() ->
         {sent, C} -> put(sent, C);
         {recd, C} -> put(recd, C)
     end,
-    Diff = timer:now_diff(erlang:now(), get(last_ts)),
+    Diff = timer:now_diff(os:timestamp(), get(last_ts)),
     case Diff > ?MICROS_PER_UPDATE of
         true  -> S = get(sent) - get(last_sent),
                  R = get(recd) - get(last_recd),
                  put(last_sent, get(sent)),
                  put(last_recd, get(recd)),
-                 put(last_ts, erlang:now()),
+                 put(last_ts, os:timestamp()),
                  io:format("Send ~p msg/s | Recv ~p msg/s~n",
                            [trunc(S * ?MICROS_PER_SECOND / Diff),
                             trunc(R * ?MICROS_PER_SECOND / Diff)]);
@@ -67,10 +67,10 @@ publish(Owner, Client, Count, TS) ->
     rabbit_stomp_client:send(
       Client, "SEND", [{"destination", ?DESTINATION}],
       [integer_to_list(Count)]),
-    Diff = timer:now_diff(erlang:now(), TS),
+    Diff = timer:now_diff(os:timestamp(), TS),
     case Diff > ?MICROS_PER_UPDATE_MSG of
         true  -> Owner ! {sent, Count + 1},
-                 publish(Owner, Client, Count + 1, erlang:now());
+                 publish(Owner, Client, Count + 1, os:timestamp());
         false -> publish(Owner, Client, Count + 1, TS)
     end.
 
@@ -79,10 +79,10 @@ recv(Owner, Client0, Count, TS) ->
         rabbit_stomp_client:recv(Client0),
     BodyInt = list_to_integer(binary_to_list(iolist_to_binary(Body))),
     Count = BodyInt,
-    Diff = timer:now_diff(erlang:now(), TS),
+    Diff = timer:now_diff(os:timestamp(), TS),
     case Diff > ?MICROS_PER_UPDATE_MSG of
         true  -> Owner ! {recd, Count + 1},
-                 recv(Owner, Client1, Count + 1, erlang:now());
+                 recv(Owner, Client1, Count + 1, os:timestamp());
         false -> recv(Owner, Client1, Count + 1, TS)
     end.
 
index 2fdca2de21b0cb61852fbbba868d7834fe71db44..25290986f92e1cf26861a23e78b671035f7e05af 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_test).
 
 all_tests() ->
     test_messages_not_dropped_on_disconnect(),
+    test_direct_client_connections_are_not_leaked(),
+    ok.
+
+-define(GARBAGE, <<"bdaf63dda9d78b075c748b740e7c3510ad203b07\nbdaf63dd">>).
+
+count_connections() ->
+    length(supervisor2:which_children(rabbit_stomp_client_sup_sup)).
+
+test_direct_client_connections_are_not_leaked() ->
+    N = count_connections(),
+    lists:foreach(fun (_) ->
+                          {ok, Client = {Socket, _}} = rabbit_stomp_client:connect(),
+                          %% send garbage which trips up the parser
+                          gen_tcp:send(Socket, ?GARBAGE),
+                          rabbit_stomp_client:send(
+                           Client, "LOL", [{"", ""}])
+                  end,
+                  lists:seq(1, 1000)),
+    timer:sleep(5000),
+    N = count_connections(),
     ok.
 
 test_messages_not_dropped_on_disconnect() ->
+    N = count_connections(),
     {ok, Client} = rabbit_stomp_client:connect(),
+    N1 = N + 1,
+    N1 = count_connections(),
     [rabbit_stomp_client:send(
        Client, "SEND", [{"destination", ?DESTINATION}],
        [integer_to_list(Count)]) || Count <- lists:seq(1, 1000)],
     rabbit_stomp_client:disconnect(Client),
     QName = rabbit_misc:r(<<"/">>, queue, <<"bulk-test">>),
     timer:sleep(3000),
+    N = count_connections(),
     rabbit_amqqueue:with(
       QName, fun(Q) ->
                      1000 = pget(messages, rabbit_amqqueue:info(Q, [messages]))
index 34b7472daa681ea1e297934ee247911e76b58ebf..a2cbdf3cc84817c69a2c5c36148b347b02e5e8a0 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_test_frame).
index e7459e33eb1f60be8ce449c0437cb39054100a8f..a25e30647c79ee4b656d3f509645553c52f6e5f0 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_stomp_test_util).
index 08476d5b6be750958d32ea6540bff73f53517e47..b8bb15080ae180dbd172acb79ab13782f5bbee6f 100644 (file)
@@ -7,7 +7,7 @@ class TestReliability(base.BaseTest):
 
     def test_send_and_disconnect(self):
         ''' Test close socket after send does not lose messages '''
-        d = "/queue/reliability"
+        destination = "/queue/reliability"
         pub_conn = self.create_connection()
         try:
             msg = "0" * (128)
@@ -17,12 +17,12 @@ class TestReliability(base.BaseTest):
             listener = base.WaitableListener()
             listener.reset(count)
             self.conn.set_listener('', listener)
-            self.conn.subscribe(destination=d)
+            self.subscribe_dest(self.conn, destination, None)
 
             for x in range(0, count):
-                pub_conn.send(msg + str(x), destination=d)
+                pub_conn.send(destination, msg + str(x))
             time.sleep(2.0)
-            pub_conn.close_socket()
+            pub_conn.disconnect()
 
             if listener.await(30):
                 self.assertEquals(count, len(listener.messages))
index 8a89a7a43ca2c4db841b17756e9834eddf695f14..53636df31ae8aa90677e9f692b1200e510ca0566 100644 (file)
@@ -1,24 +1,29 @@
 import unittest
 import os
+import os.path
+import sys
 
 import stomp
 import base
+import ssl
 
-ssl_key_file = os.path.abspath("test/certs/client/key.pem")
-ssl_cert_file = os.path.abspath("test/certs/client/cert.pem")
-ssl_ca_certs = os.path.abspath("test/certs/testca/cacert.pem")
+
+base_path = os.path.dirname(sys.argv[0])
+
+ssl_key_file = os.path.abspath(base_path + "/../certs/client/key.pem")
+ssl_cert_file = os.path.abspath(base_path + "/../certs/client/cert.pem")
+ssl_ca_certs = os.path.abspath(base_path  + "/../certs/testca/cacert.pem")
 
 class TestSslClient(unittest.TestCase):
 
     def __ssl_connect(self):
-        conn = stomp.Connection(user="guest", passcode="guest",
-                                host_and_ports = [ ('localhost', 61614) ],
+        conn = stomp.Connection(host_and_ports = [ ('localhost', 61614) ],
                                 use_ssl = True, ssl_key_file = ssl_key_file,
                                 ssl_cert_file = ssl_cert_file,
                                 ssl_ca_certs = ssl_ca_certs)
-
+        print "FILE: ", ssl_cert_file
         conn.start()
-        conn.connect()
+        conn.connect("guest", "guest")
         return conn
 
     def __ssl_auth_connect(self):
@@ -32,11 +37,11 @@ class TestSslClient(unittest.TestCase):
 
     def test_ssl_connect(self):
         conn = self.__ssl_connect()
-        conn.stop()
+        conn.disconnect()
 
     def test_ssl_auth_connect(self):
         conn = self.__ssl_auth_connect()
-        conn.stop()
+        conn.disconnect()
 
     def test_ssl_send_receive(self):
         conn = self.__ssl_connect()
@@ -53,7 +58,7 @@ class TestSslClient(unittest.TestCase):
             conn.set_listener('', listener)
 
             d = "/topic/ssl.test"
-            conn.subscribe(destination=d, receipt="sub")
+            conn.subscribe(destination=d, ack="auto", id="ctag", receipt="sub")
 
             self.assertTrue(listener.await(1))
 
@@ -61,7 +66,7 @@ class TestSslClient(unittest.TestCase):
                               listener.receipts[0]['headers']['receipt-id'])
 
             listener.reset(1)
-            conn.send("Hello SSL!", destination=d)
+            conn.send(body="Hello SSL!", destination=d)
 
             self.assertTrue(listener.await())
 
index 49795524e3f358de7151e79c3e017eca3fd19bb6..ddeb1fe1108b472e1656ce2ff2a792e475979cfd 100755 (executable)
@@ -3,7 +3,14 @@
 import test_runner
 
 if __name__ == '__main__':
-    modules = ['parsing', 'destinations', 'lifecycle', 'transactions',
-               'ack', 'errors', 'reliability']
+    modules = [
+        'parsing',
+        'destinations',
+        'lifecycle',
+        'transactions',
+        'ack',
+        'errors',
+        'reliability',
+    ]
     test_runner.run_unittests(modules)
 
index 1d95f7e165ff74d387476f0ed2fdb7835831cea6..d4f166b6b169f606c32fb75d874edfbe584e95c4 100644 (file)
@@ -7,14 +7,14 @@ class TestTransactions(base.BaseTest):
 
     def test_tx_commit(self):
         ''' Test TX with a COMMIT and ensure messages are delivered '''
-        d = "/exchange/amq.fanout"
+        destination = "/exchange/amq.fanout"
         tx = "test.tx"
 
         self.listener.reset()
-        self.conn.subscribe(destination=d)
+        self.subscribe_dest(self.conn, destination, None)
         self.conn.begin(transaction=tx)
-        self.conn.send("hello!", destination=d, transaction=tx)
-        self.conn.send("again!", destination=d)
+        self.conn.send(destination, "hello!", transaction=tx)
+        self.conn.send(destination, "again!")
 
         ## should see the second message
         self.assertTrue(self.listener.await(3))
@@ -31,14 +31,14 @@ class TestTransactions(base.BaseTest):
 
     def test_tx_abort(self):
         ''' Test TX with an ABORT and ensure messages are discarded '''
-        d = "/exchange/amq.fanout"
+        destination = "/exchange/amq.fanout"
         tx = "test.tx"
 
         self.listener.reset()
-        self.conn.subscribe(destination=d)
+        self.subscribe_dest(self.conn, destination, None)
         self.conn.begin(transaction=tx)
-        self.conn.send("hello!", destination=d, transaction=tx)
-        self.conn.send("again!", destination=d)
+        self.conn.send(destination, "hello!", transaction=tx)
+        self.conn.send(destination, "again!")
 
         ## should see the second message
         self.assertTrue(self.listener.await(3))
diff --git a/rabbitmq-server/plugins-src/rabbitmq-test/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-test/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 74a96aec93f3875833025dfd681c4eacff186204..03466a7e56f93e7435f4f26c6ced02677d104cfb 100644 (file)
@@ -51,21 +51,27 @@ full:
        $(MAKE) prepare && \
        { $(MAKE) -C $(BROKER_DIR) run-tests || { OK=false; $(TESTS_FAILED); } } && \
        { $(MAKE) run-qpid-testsuite || { OK=false; $(TESTS_FAILED); } } && \
-       { ( cd $(TEST_DIR) && ant test-suite ) || { OK=false; $(TESTS_FAILED); } } && \
+       { ( cd $(TEST_DIR) && MAKE=$(MAKE) ant test-suite ) || { OK=false; $(TESTS_FAILED); } } && \
        $(MAKE) cleanup && { $$OK || $(TESTS_FAILED); } && $$OK
 
+unit:
+       OK=true && \
+       $(MAKE) prepare && \
+       { $(MAKE) -C $(BROKER_DIR) run-tests || OK=false; } && \
+       $(MAKE) cleanup && $$OK
+
 lite:
        OK=true && \
        $(MAKE) prepare && \
        { $(MAKE) -C $(BROKER_DIR) run-tests || OK=false; } && \
-       { ( cd $(TEST_DIR) && ant test-suite ) || OK=false; } && \
+       { ( cd $(TEST_DIR) && MAKE=$(MAKE) ant test-suite ) || OK=false; } && \
        $(MAKE) cleanup && $$OK
 
 conformance16:
        OK=true && \
        $(MAKE) prepare && \
        { $(MAKE) -C $(BROKER_DIR) run-tests || OK=false; } && \
-       { ( cd $(TEST_DIR) && ant test-suite ) || OK=false; } && \
+       { ( cd $(TEST_DIR) && MAKE=$(MAKE) ant test-suite ) || OK=false; } && \
        $(MAKE) cleanup && $$OK
 
 qpid_testsuite:
@@ -170,3 +176,4 @@ cleanup:
 
 create_ssl_certs:
        $(MAKE) -C certs DIR=$(SSL_CERTS_DIR) clean all
+
index 3afe8266b6ba88f05d56e106cc7329f065b40c04..9b1950573c8f16c116a9e8317fc38716067a7c62 100644 (file)
@@ -1,5 +1,6 @@
 Useful targets:
 
+$ make unit # runs the Erlang unit tests
 $ make lite # runs the Erlang unit tests and the Java client / functional tests
 $ make full # runs both the above plus the QPid test suite
 $ make test # runs the Erlang multi-node integration tests
index d9b0f97c5cf8004c61701ac9595d76e0a9a3c236..8808931fda7e588dc1ea5f13a7a3341c3424dcc2 100644 (file)
@@ -1,6 +1,7 @@
 DEPS:=rabbitmq-erlang-client
 FILTER:=all
 COVER:=false
+WITH_BROKER_TEST_COMMANDS:=rabbit_test_runner:run_in_broker(\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\")
 STANDALONE_TEST_COMMANDS:=rabbit_test_runner:run_multi(\"$(UMBRELLA_BASE_DIR)/rabbitmq-server\",\"$(PACKAGE_DIR)/test/ebin\",\"$(FILTER)\",$(COVER),none)
 
 ## Require R15B to compile inet_proxy_dist since it requires includes
index 458966c5aa0dc30d113ef8a9ad1b9954faca7135..847ef2ec3ab8136efe8fff93880a44fbb7678484 100644 (file)
@@ -66,7 +66,7 @@ do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) ->
                     %% Modification START
                     ProxyPort = case TcpPort >= 25672 andalso TcpPort < 25700
                                     andalso inet_tcp_proxy:is_enabled() of
-                                    true  -> TcpPort + 10000;
+                                    true  -> TcpPort + 5000;
                                     false -> TcpPort
                                 end,
                    case inet_tcp:connect(Ip, ProxyPort, 
index 18ffcc280080f71f09f1d21d1945e0a13bb7b101..28d58e0917955d1b738b3d763b8a6b5864819d05 100644 (file)
@@ -61,7 +61,7 @@ error_handler(Thunk) ->
 go() ->
     ets:new(?TABLE, [public, named_table]),
     {ok, Port} = application:get_env(kernel, inet_dist_listen_min),
-    ProxyPort = Port + 10000,
+    ProxyPort = Port + 5000,
     {ok, Sock} = gen_tcp:listen(ProxyPort, [inet,
                                             {reuseaddr, true}]),
     accept_loop(Sock, Port).
index 1bab158cf72833144b480a2d5535d10406223ffd..a79ea9fafeafffbc704d9253e028eb535deeb769 100644 (file)
@@ -62,6 +62,7 @@ name() ->
 %%----------------------------------------------------------------------------
 
 init([]) ->
+    net_kernel:monitor_nodes(true),
     {ok, #state{ports   = dict:new(),
                 pending = []}}.
 
@@ -90,6 +91,13 @@ handle_call(_Req, _From, State) ->
 handle_cast(_C, State) ->
     {noreply, State}.
 
+handle_info({nodedown, Node}, State = #state{ports = Ports}) ->
+    Ports1 = dict:filter(
+               fun (_, {From, To}) ->
+                       Node =/= From andalso Node =/= To
+               end, Ports),
+    {noreply, State#state{ports = Ports1}};
+
 handle_info(_I, State) ->
     {noreply, State}.
 
index ce82e938a14901060f8da2eb72a907c140b3e415..f286733bbd9b768fc4448140a47e3cf8d55047ca 100644 (file)
 -export([cluster/2, cluster_ab/1, cluster_abc/1, start_ab/1, start_abc/1]).
 -export([start_connections/1, build_cluster/1]).
 -export([ha_policy_all/1, ha_policy_two_pos/1]).
--export([start_nodes/2, start_nodes/3, add_to_cluster/2]).
+-export([start_nodes/2, start_nodes/3, add_to_cluster/2,
+         rabbitmqctl/2, rabbitmqctl_fail/2]).
 -export([stop_nodes/1, start_node/1, stop_node/1, kill_node/1, restart_node/1,
-         execute/1]).
+         start_node_fail/1, execute/1]).
 -export([cover_work_factor/2]).
 
 -import(rabbit_test_util, [set_ha_policy/3, set_ha_policy/4, a2b/1]).
--import(rabbit_misc, [pget/2]).
+-import(rabbit_misc, [pget/2, pget/3]).
 
 -define(INITIAL_KEYS, [cover, base, server, plugins]).
--define(NON_RUNNING_KEYS, ?INITIAL_KEYS ++ [nodename, port]).
+-define(NON_RUNNING_KEYS, ?INITIAL_KEYS ++ [nodename, port, mnesia_dir]).
 
 cluster_ab(InitialCfg)  -> cluster(InitialCfg, [a, b]).
 cluster_abc(InitialCfg) -> cluster(InitialCfg, [a, b, c]).
@@ -52,7 +53,9 @@ start_nodes(InitialCfg0, NodeNames, FirstPort) ->
                       [{_, _}|_] -> [InitialCfg0 || _ <- NodeNames];
                       _          -> InitialCfg0
                   end,
-    Nodes = [[{nodename, N}, {port, P} | strip_non_initial(Cfg)]
+    Nodes = [[{nodename, N}, {port, P},
+              {mnesia_dir, rabbit_misc:format("rabbitmq-~s-mnesia", [N])} |
+              strip_non_initial(Cfg)]
              || {N, P, Cfg} <- lists:zip3(NodeNames, Ports, InitialCfgs)],
     [start_node(Node) || Node <- Nodes].
 
@@ -68,54 +71,23 @@ strip_non_initial(Cfg) ->
 strip_running(Cfg) ->
     [{K, V} || {K, V} <- Cfg, lists:member(K, ?NON_RUNNING_KEYS)].
 
-enable_plugins(Cfg) -> enable_plugins(pget(plugins, Cfg), pget(server, Cfg)).
+enable_plugins(Cfg) ->
+    enable_plugins(pget(plugins, Cfg), pget(server, Cfg), Cfg).
 
-enable_plugins(none, _Server) -> ok;
-enable_plugins(Dir, Server) ->
-    Env = plugins_env(Dir),
-    R = execute(Env, Server ++ "/scripts/rabbitmq-plugins list -m"),
-    Plugins = string:tokens(R, "\n"),
-    [execute(Env, {Server ++ "/scripts/rabbitmq-plugins enable ~s", [Plugin]})
-     || Plugin <- Plugins],
+enable_plugins(none, _Server, _Cfg) -> ok;
+enable_plugins(_Dir, Server, Cfg) ->
+    R = execute(Cfg, Server ++ "/scripts/rabbitmq-plugins list -m"),
+    Plugins = string:join(string:tokens(R, "\n"), " "),
+    execute(Cfg, {Server ++ "/scripts/rabbitmq-plugins set --offline ~s",
+                  [Plugins]}),
     ok.
 
-plugins_env(none) ->
-    [{"RABBITMQ_ENABLED_PLUGINS_FILE", "/does-not-exist"}];
-plugins_env(Dir) ->
-    [{"RABBITMQ_PLUGINS_DIR",          {"~s/plugins", [Dir]}},
-     {"RABBITMQ_PLUGINS_EXPAND_DIR",   {"~s/expand", [Dir]}},
-     {"RABBITMQ_ENABLED_PLUGINS_FILE", {"~s/enabled_plugins", [Dir]}}].
-
-start_node(Cfg) ->
-    Nodename = pget(nodename, Cfg),
-    Port = pget(port, Cfg),
-    Base = pget(base, Cfg),
+start_node(Cfg0) ->
+    Node = rabbit_nodes:make(pget(nodename, Cfg0)),
+    Cfg = [{node, Node} | Cfg0],
     Server = pget(server, Cfg),
-    PidFile = rabbit_misc:format("~s/~s.pid", [Base, Nodename]),
-    Linked =
-        execute_bg(
-          [{"RABBITMQ_MNESIA_BASE", {"~s/rabbitmq-~s-mnesia", [Base,Nodename]}},
-           {"RABBITMQ_LOG_BASE",    {"~s", [Base]}},
-           {"RABBITMQ_NODENAME",    {"~s", [Nodename]}},
-           {"RABBITMQ_NODE_PORT",   {"~B", [Port]}},
-           {"RABBITMQ_PID_FILE",    PidFile},
-           {"RABBITMQ_CONFIG_FILE", "/some/path/which/does/not/exist"},
-           {"RABBITMQ_ALLOW_INPUT", "1"}, %% Needed to make it close on our exit
-           %% Bit of a hack - only needed for mgmt tests.
-           {"RABBITMQ_SERVER_START_ARGS",
-            {"-rabbitmq_management listener [{port,1~B}]", [Port]}},
-           {"RABBITMQ_SERVER_ERL_ARGS",
-            %% Next two lines are defaults
-            {"+K true +A30 +P 1048576 "
-             "-kernel inet_default_connect_options [{nodelay,true}] "
-             %% Some tests need to be able to make distribution unhappy
-             "-pa ~s/../rabbitmq-test/ebin "
-             "-proto_dist inet_proxy", [Server]}}
-           | plugins_env(pget(plugins, Cfg))],
-          Server ++ "/scripts/rabbitmq-server"),
-    execute({Server ++ "/scripts/rabbitmqctl -n ~s wait ~s",
-             [Nodename, PidFile]}),
-    Node = rabbit_nodes:make(Nodename),
+    Linked = execute_bg(Cfg, Server ++ "/scripts/rabbitmq-server"),
+    rabbitmqctl(Cfg, {"wait ~s", [pid_file(Cfg)]}),
     OSPid = rpc:call(Node, os, getpid, []),
     %% The cover system thinks all nodes with the same name are the
     %% same node and will automaticaly re-establish cover as soon as
@@ -125,11 +97,16 @@ start_node(Cfg) ->
         {true, false} -> cover:start([Node]);
         _             -> ok
     end,
-    [{node,       Node},
-     {pid_file,   PidFile}, 
-     {os_pid,     OSPid},
+    [{os_pid,     OSPid},
      {linked_pid, Linked} | Cfg].
 
+start_node_fail(Cfg0) ->
+    Node = rabbit_nodes:make(pget(nodename, Cfg0)),
+    Cfg = [{node, Node}, {acceptable_exit_codes, lists:seq(1, 255)} | Cfg0],
+    Server = pget(server, Cfg),
+    execute(Cfg, Server ++ "/scripts/rabbitmq-server"),
+    ok.
+
 build_cluster([First | Rest]) ->
     add_to_cluster([First], Rest).
 
@@ -139,14 +116,21 @@ add_to_cluster([First | _] = Existing, New) ->
 
 cluster_with(Cfg, NewCfg) ->
     Node = pget(node, Cfg),
-    NewNodename = pget(nodename, NewCfg),
+    rabbitmqctl(NewCfg, stop_app),
+    rabbitmqctl(NewCfg, {"join_cluster ~s", [Node]}),
+    rabbitmqctl(NewCfg, start_app).
+
+rabbitmqctl(Cfg, Str) ->
+    Node = pget(node, Cfg),
     Server = pget(server, Cfg),
-    execute({Server ++ "/scripts/rabbitmqctl -n ~s stop_app",
-             [NewNodename]}),
-    execute({Server ++ "/scripts/rabbitmqctl -n ~s join_cluster ~s",
-             [NewNodename, Node]}),
-    execute({Server ++ "/scripts/rabbitmqctl -n ~s start_app",
-             [NewNodename]}).   
+    Cmd = case Node of
+              undefined -> {"~s", [fmt(Str)]};
+              _         -> {"-n ~s ~s", [Node, fmt(Str)]}
+          end,
+    execute(Cfg, {Server ++ "/scripts/rabbitmqctl ~s", [fmt(Cmd)]}).
+
+rabbitmqctl_fail(Cfg, Str) ->
+    rabbitmqctl([{acceptable_exit_codes, lists:seq(1, 255)} | Cfg], Str).
 
 ha_policy_all([Cfg | _] = Cfgs) ->
     set_ha_policy(Cfg, <<".*">>, <<"all">>),
@@ -155,9 +139,11 @@ ha_policy_all([Cfg | _] = Cfgs) ->
 ha_policy_two_pos([Cfg | _] = Cfgs) ->
     Members = [a2b(pget(node, C)) || C <- Cfgs],
     TwoNodes = [M || M <- lists:sublist(Members, 2)],
-    set_ha_policy(Cfg, <<"^ha.two.">>, {<<"nodes">>, TwoNodes}, []),
+    set_ha_policy(Cfg, <<"^ha.two.">>, {<<"nodes">>, TwoNodes},
+                  [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
     set_ha_policy(Cfg, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes},
-                  [{<<"ha-sync-mode">>, <<"automatic">>}]),
+                  [{<<"ha-sync-mode">>,           <<"automatic">>},
+                   {<<"ha-promote-on-shutdown">>, <<"always">>}]),
     Cfgs.
 
 start_connections(Nodes) -> [start_connection(Node) || Node <- Nodes].
@@ -171,17 +157,24 @@ start_connection(Cfg) ->
 stop_nodes(Nodes) -> [stop_node(Node) || Node <- Nodes].
 
 stop_node(Cfg) ->
-    Server = pget(server, Cfg),
     maybe_flush_cover(Cfg),
-    catch execute({Server ++ "/scripts/rabbitmqctl -n ~s stop ~s",
-                   [pget(nodename, Cfg), pget(pid_file, Cfg)]}),
+    catch rabbitmqctl(Cfg, {"stop ~s", [pid_file(Cfg)]}),
     strip_running(Cfg).
 
 kill_node(Cfg) ->
     maybe_flush_cover(Cfg),
-    catch execute({"kill -9 ~s", [pget(os_pid, Cfg)]}),
+    OSPid = pget(os_pid, Cfg),
+    catch execute(Cfg, {"kill -9 ~s", [OSPid]}),
+    await_os_pid_death(OSPid),
     strip_running(Cfg).
 
+await_os_pid_death(OSPid) ->
+    case rabbit_misc:is_os_process_alive(OSPid) of
+        true  -> timer:sleep(100),
+                 await_os_pid_death(OSPid);
+        false -> ok
+    end.
+
 restart_node(Cfg) ->
     start_node(stop_node(Cfg)).
 
@@ -201,34 +194,86 @@ cover_work_factor(Without, Cfg) ->
 
 %%----------------------------------------------------------------------------
 
-execute(Cmd) -> execute([], Cmd).
+execute(Cmd) ->
+    execute([], Cmd, [0]).
+
+execute(Cfg, Cmd) ->
+    %% code 137 -> killed with SIGKILL which we do in some tests
+    execute(environment(Cfg), Cmd, pget(acceptable_exit_codes, Cfg, [0, 137])).
 
-execute(Env0, Cmd0) ->
-    Env = [{K, fmt(V)} || {K, V} <- Env0],
+execute(Env0, Cmd0, AcceptableExitCodes) ->
+    Env = [{"RABBITMQ_" ++ K, fmt(V)} || {K, V} <- Env0],
     Cmd = fmt(Cmd0),
+    error_logger:info_msg("Invoking '~s'~n", [Cmd]),
     Port = erlang:open_port(
              {spawn, "/usr/bin/env sh -c \"" ++ Cmd ++ "\""},
              [{env, Env}, exit_status,
               stderr_to_stdout, use_stdio]),
-    port_receive_loop(Port, "").
+    port_receive_loop(Port, "", AcceptableExitCodes).
 
-port_receive_loop(Port, Stdout) ->
-    receive
-        {Port, {exit_status, 0}}   -> Stdout;
-        {Port, {exit_status, 137}} -> Stdout; %% [0]
-        {Port, {exit_status, X}}   -> exit({exit_status, X, Stdout});
-        {Port, {data, Out}}        -> %%io:format(user, "~s", [Out]),
-                                      port_receive_loop(Port, Stdout ++ Out)
+environment(Cfg) ->
+    Nodename = pget(nodename, Cfg),
+    Plugins = pget(plugins, Cfg),
+    case Nodename of
+        undefined ->
+            plugins_env(Plugins);
+        _         ->
+            Port = pget(port, Cfg),
+            Base = pget(base, Cfg),
+            Server = pget(server, Cfg),
+            [{"MNESIA_DIR",         {"~s/~s", [Base, pget(mnesia_dir, Cfg)]}},
+             {"PLUGINS_EXPAND_DIR", {"~s/~s-plugins-expand", [Base, Nodename]}},
+             {"LOG_BASE",           {"~s", [Base]}},
+             {"NODENAME",           {"~s", [Nodename]}},
+             {"NODE_PORT",          {"~B", [Port]}},
+             {"PID_FILE",           pid_file(Cfg)},
+             {"CONFIG_FILE",        "/some/path/which/does/not/exist"},
+             {"ALLOW_INPUT",        "1"}, %% Needed to make it close on exit
+             %% Bit of a hack - only needed for mgmt tests.
+             {"SERVER_START_ARGS",
+              {"-rabbitmq_management listener [{port,1~B}]", [Port]}},
+             {"SERVER_ERL_ARGS",
+              %% Next two lines are defaults
+              {"+K true +A30 +P 1048576 "
+               "-kernel inet_default_connect_options [{nodelay,true}] "
+               %% Some tests need to be able to make distribution unhappy
+               "-pa ~s/../rabbitmq-test/ebin "
+               "-proto_dist inet_proxy", [Server]}}
+             | plugins_env(Plugins)]
     end.
 
-%% [0] code 137 -> killed with SIGKILL which we do in some tests
+plugins_env(none) ->
+    [{"ENABLED_PLUGINS_FILE", "/does-not-exist"}];
+plugins_env(Dir) ->
+    [{"PLUGINS_DIR",          {"~s/plugins", [Dir]}},
+     {"PLUGINS_EXPAND_DIR",   {"~s/expand", [Dir]}},
+     {"ENABLED_PLUGINS_FILE", {"~s/enabled_plugins", [Dir]}}].
+
+pid_file(Cfg) ->
+    rabbit_misc:format("~s/~s.pid", [pget(base, Cfg), pget(nodename, Cfg)]).
+
+port_receive_loop(Port, Stdout, AcceptableExitCodes) ->
+    receive
+        {Port, {exit_status, X}} ->
+            Fmt = "Command exited with code ~p~nStdout: ~s~n",
+            Args = [X, Stdout],
+            case lists:member(X, AcceptableExitCodes) of
+                true  -> error_logger:info_msg(Fmt, Args),
+                         Stdout;
+                false -> error_logger:error_msg(Fmt, Args),
+                         exit({exit_status, X, AcceptableExitCodes, Stdout})
+            end;
+        {Port, {data, Out}} ->
+            port_receive_loop(Port, Stdout ++ Out, AcceptableExitCodes)
+    end.
 
-execute_bg(Env, Cmd) ->
+execute_bg(Cfg, Cmd) ->
     spawn_link(fun () ->
-                       execute(Env, Cmd),
+                       execute(Cfg, Cmd),
                        {links, Links} = process_info(self(), links),
                        [unlink(L) || L <- Links]
                end).
 
 fmt({Fmt, Args}) -> rabbit_misc:format(Fmt, Args);
 fmt(Str)         -> Str.
+
index 7193704b312fc94c354ff2fe06dff82c0cedf243..d0df292db6647b093cbae30f67971039a800bcdb 100644 (file)
@@ -25,6 +25,7 @@
 -export([run_in_broker/2, run_multi/5]).
 
 run_in_broker(Dir, Filter) ->
+    add_server_test_ebin_dir(),
     io:format("~nIn-broker tests~n================~n~n", []),
     eunit:test(make_tests_single(Dir, Filter, ?TIMEOUT), []).
 
@@ -124,8 +125,15 @@ make_test_multi(M, FWith, F, ShowHeading, Timeout, Width, InitialCfg) ->
                fun () ->
                        [link(pget(linked_pid, N)) || N <- Nodes],
                        io:format(user, " [running]", []),
-                       M:F(Nodes),
-                       io:format(user, " [PASSED]", [])
+                       %%try
+                           M:F(Nodes),
+                           io:format(user, " [PASSED]", [])
+                       %% catch
+                       %%     Type:Reason ->
+                       %%         io:format(user, "YYY stop~n", []),
+                       %%         rabbit_test_configs:stop_nodes(Nodes),
+                       %%         exit({Type, Reason, erlang:get_stacktrace()})
+                       %% end
                end}]
      end}.
 %% [0] If we didn't get as far as starting any nodes then we only have
@@ -212,3 +220,11 @@ error_logger_logfile_filename() ->
        {error,_} -> {error, no_log_file};
        Val       -> Val
     end.
+
+add_server_test_ebin_dir() ->
+    %% Some tests need modules from this dir, but it's not on the path
+    %% by default.
+    {file, Path} = code:is_loaded(rabbit),
+    Ebin = filename:dirname(Path),
+    TestEbin = filename:join([Ebin, "..", "test", "ebin"]),
+    code:add_path(TestEbin).
index c8b0f9a2e5c2f8082b7486a84921344feaa981bc..973e1b0871ed36aca09e80d3c31bcb4bfb52c2be 100644 (file)
@@ -45,6 +45,12 @@ clear_param(Cfg, Component, Name) ->
     ok = rpc:call(pget(node, Cfg), rabbit_runtime_parameters, clear,
                  [<<"/">>, Component, Name]).
 
+enable_plugin(Cfg, Plugin) ->
+    plugins_action(enable, Cfg, [Plugin], []).
+
+disable_plugin(Cfg, Plugin) ->
+    plugins_action(disable, Cfg, [Plugin], []).
+
 control_action(Command, Cfg) ->
     control_action(Command, Cfg, [], []).
 
@@ -59,6 +65,13 @@ control_action(Command, Cfg, Args, Opts) ->
                       error_logger:info_msg(F ++ "~n", A)
               end]).
 
+plugins_action(Command, Cfg, Args, Opts) ->
+    PluginsFile = os:getenv("RABBITMQ_ENABLED_PLUGINS_FILE"),
+    PluginsDir = os:getenv("RABBITMQ_PLUGINS_DIR"),
+    Node = pget(node, Cfg),
+    rpc:call(Node, rabbit_plugins_main, action,
+             [Command, Node, Args, Opts, PluginsFile, PluginsDir]).
+
 restart_app(Cfg) ->
     stop_app(Cfg),
     start_app(Cfg).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-test/test/src/cluster_rename.erl b/rabbitmq-server/plugins-src/rabbitmq-test/test/src/cluster_rename.erl
new file mode 100644 (file)
index 0000000..258c0dc
--- /dev/null
@@ -0,0 +1,194 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%
+-module(cluster_rename).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-define(CLUSTER2,
+        fun(C) -> rabbit_test_configs:cluster(C, [bugs, bigwig]) end).
+
+-define(CLUSTER3,
+        fun(C) -> rabbit_test_configs:cluster(C, [bugs, bigwig, peter]) end).
+
+%% Rolling rename of a cluster, each node should do a secondary rename.
+rename_cluster_one_by_one_with() -> ?CLUSTER3.
+rename_cluster_one_by_one([Bugs, Bigwig, Peter]) ->
+    publish_all([{Bugs, <<"1">>}, {Bigwig, <<"2">>}, {Peter, <<"3">>}]),
+
+    Jessica = stop_rename_start(Bugs,   jessica, [bugs, jessica]),
+    Hazel   = stop_rename_start(Bigwig, hazel,   [bigwig, hazel]),
+    Flopsy  = stop_rename_start(Peter,  flopsy,  [peter, flopsy]),
+
+    consume_all([{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+    stop_all([Jessica, Hazel, Flopsy]),
+    ok.
+
+%% Big bang rename of a cluster, bugs should do a primary rename.
+rename_cluster_big_bang_with() -> ?CLUSTER3.
+rename_cluster_big_bang([Bugs, Bigwig, Peter]) ->
+    publish_all([{Bugs, <<"1">>}, {Bigwig, <<"2">>}, {Peter, <<"3">>}]),
+
+    Peter1  = rabbit_test_configs:stop_node(Peter),
+    Bigwig1 = rabbit_test_configs:stop_node(Bigwig),
+    Bugs1   = rabbit_test_configs:stop_node(Bugs),
+
+    Map = [bugs, jessica, bigwig, hazel, peter, flopsy],
+    Jessica0 = rename_node(Bugs1,   jessica, Map),
+    Hazel0   = rename_node(Bigwig1, hazel,   Map),
+    Flopsy0  = rename_node(Peter1,  flopsy,  Map),
+
+    Jessica = rabbit_test_configs:start_node(Jessica0),
+    Hazel   = rabbit_test_configs:start_node(Hazel0),
+    Flopsy  = rabbit_test_configs:start_node(Flopsy0),
+
+    consume_all([{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+    stop_all([Jessica, Hazel, Flopsy]),
+    ok.
+
+%% Here we test that bugs copes with things being renamed around it.
+partial_one_by_one_with() -> ?CLUSTER3.
+partial_one_by_one([Bugs, Bigwig, Peter]) ->
+    publish_all([{Bugs, <<"1">>}, {Bigwig, <<"2">>}, {Peter, <<"3">>}]),
+
+    Jessica = stop_rename_start(Bugs,   jessica, [bugs, jessica]),
+    Hazel   = stop_rename_start(Bigwig, hazel,   [bigwig, hazel]),
+
+    consume_all([{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Peter, <<"3">>}]),
+    stop_all([Jessica, Hazel, Peter]),
+    ok.
+
+%% Here we test that bugs copes with things being renamed around it.
+partial_big_bang_with() -> ?CLUSTER3.
+partial_big_bang([Bugs, Bigwig, Peter]) ->
+    publish_all([{Bugs, <<"1">>}, {Bigwig, <<"2">>}, {Peter, <<"3">>}]),
+
+    Peter1  = rabbit_test_configs:stop_node(Peter),
+    Bigwig1 = rabbit_test_configs:stop_node(Bigwig),
+    Bugs1   = rabbit_test_configs:stop_node(Bugs),
+
+    Map = [bigwig, hazel, peter, flopsy],
+    Hazel0   = rename_node(Bigwig1, hazel,   Map),
+    Flopsy0  = rename_node(Peter1,  flopsy,  Map),
+
+    Bugs2  = rabbit_test_configs:start_node(Bugs1),
+    Hazel  = rabbit_test_configs:start_node(Hazel0),
+    Flopsy = rabbit_test_configs:start_node(Flopsy0),
+
+    consume_all([{Bugs2, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+    stop_all([Bugs2, Hazel, Flopsy]),
+    ok.
+
+%% We should be able to specify the -n parameter on ctl with either
+%% the before or after name for the local node (since in real cases
+%% one might want to invoke the command before or after the hostname
+%% has changed) - usually we test before so here we test after.
+post_change_nodename_with() -> ?CLUSTER2.
+post_change_nodename([Bugs, _Bigwig]) ->
+    publish(Bugs, <<"bugs">>),
+
+    Bugs1    = rabbit_test_configs:stop_node(Bugs),
+    Bugs2    = [{nodename, jessica} | proplists:delete(nodename, Bugs1)],
+    Jessica0 = rename_node(Bugs2, jessica, [bugs, jessica]),
+    Jessica  = rabbit_test_configs:start_node(Jessica0),
+
+    consume(Jessica, <<"bugs">>),
+    stop_all([Jessica]),
+    ok.
+
+%% If we invoke rename but the node name does not actually change, we
+%% should roll back.
+abortive_rename_with() -> ?CLUSTER2.
+abortive_rename([Bugs, _Bigwig]) ->
+    publish(Bugs,  <<"bugs">>),
+
+    Bugs1   = rabbit_test_configs:stop_node(Bugs),
+    _Jessica = rename_node(Bugs1, jessica, [bugs, jessica]),
+    Bugs2 = rabbit_test_configs:start_node(Bugs1),
+
+    consume(Bugs2, <<"bugs">>),
+    ok.
+
+%% And test some ways the command can fail.
+rename_fail_with() -> ?CLUSTER2.
+rename_fail([Bugs, _Bigwig]) ->
+    Bugs1 = rabbit_test_configs:stop_node(Bugs),
+    %% Rename from a node that does not exist
+    rename_node_fail(Bugs1, [bugzilla, jessica]),
+    %% Rename to a node which does
+    rename_node_fail(Bugs1, [bugs, bigwig]),
+    %% Rename two nodes to the same thing
+    rename_node_fail(Bugs1, [bugs, jessica, bigwig, jessica]),
+    %% Rename while impersonating a node not in the cluster
+    rename_node_fail(set_node(rabbit, Bugs1), [bugs, jessica]),
+    ok.
+
+rename_twice_fail_with() -> ?CLUSTER2.
+rename_twice_fail([Bugs, _Bigwig]) ->
+    Bugs1 = rabbit_test_configs:stop_node(Bugs),
+    Indecisive = rename_node(Bugs1, indecisive, [bugs, indecisive]),
+    rename_node_fail(Indecisive, [indecisive, jessica]),
+    ok.
+
+%% ----------------------------------------------------------------------------
+
+%% Normal post-test stop does not work since names have changed...
+stop_all(Cfgs) ->
+     [rabbit_test_configs:stop_node(Cfg) || Cfg <- Cfgs].
+
+stop_rename_start(Cfg, Nodename, Map) ->
+    rabbit_test_configs:start_node(
+      rename_node(rabbit_test_configs:stop_node(Cfg), Nodename, Map)).
+
+rename_node(Cfg, Nodename, Map) ->
+    rename_node(Cfg, Nodename, Map, fun rabbit_test_configs:rabbitmqctl/2).
+
+rename_node_fail(Cfg, Map) ->
+    rename_node(Cfg, ignored, Map, fun rabbit_test_configs:rabbitmqctl_fail/2).
+
+rename_node(Cfg, Nodename, Map, Ctl) ->
+    MapS = string:join(
+             [atom_to_list(rabbit_nodes:make(N)) || N <- Map], " "),
+    Ctl(Cfg, {"rename_cluster_node ~s", [MapS]}),
+    set_node(Nodename, Cfg).
+
+publish(Cfg, Q) ->
+    Ch = pget(channel, Cfg),
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = #'P_basic'{delivery_mode = 2},
+                                payload = Q}),
+    amqp_channel:wait_for_confirms(Ch).
+
+consume(Cfg, Q) ->
+    {_Conn, Ch} = rabbit_test_util:connect(Cfg),
+    amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+    {#'basic.get_ok'{}, #amqp_msg{payload = Q}} =
+        amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+
+publish_all(CfgsKeys) ->
+    [publish(Cfg, Key) || {Cfg, Key} <- CfgsKeys].
+
+consume_all(CfgsKeys) ->
+    [consume(Cfg, Key) || {Cfg, Key} <- CfgsKeys].
+
+set_node(Nodename, Cfg) ->
+    [{nodename, Nodename} | proplists:delete(nodename, Cfg)].
index 43eb06bb9285f6cf08990040dbb1a3a663e9e34e..f8b6cfd1635a698cdf565f87480f15513839aaf0 100644 (file)
@@ -106,7 +106,7 @@ join_to_start_interval(Config) ->
     assert_clustered([Rabbit, Hare]).
 
 forget_cluster_node_with() -> start_abc.
-forget_cluster_node(Config) ->
+forget_cluster_node([_, HareCfg, _] = Config) ->
     [Rabbit, Hare, Bunny] = cluster_members(Config),
 
     %% Trying to remove a node not in the cluster should fail
@@ -145,9 +145,13 @@ forget_cluster_node(Config) ->
     ok = stop_app(Bunny),
     %% This is fine but we need the flag
     assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end),
-    %% Hare was not the second-to-last to go down
-    ok = forget_cluster_node(Hare, Bunny, true),
-    ok = start_app(Hare),
+    %% Also fails because hare node is still running
+    assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end),
+    %% But this works
+    HareCfg2 = rabbit_test_configs:stop_node(HareCfg),
+    rabbit_test_configs:rabbitmqctl(
+      HareCfg2, {"forget_cluster_node --offline ~s", [Bunny]}),
+    _HareCfg3 = rabbit_test_configs:start_node(HareCfg2),
     ok = start_app(Rabbit),
     %% Bunny still thinks its clustered with Rabbit and Hare
     assert_failure(fun () -> start_app(Bunny) end),
@@ -156,28 +160,137 @@ forget_cluster_node(Config) ->
     assert_not_clustered(Bunny),
     assert_clustered([Rabbit, Hare]).
 
-forget_cluster_node_removes_things_with() -> start_abc.
-forget_cluster_node_removes_things([RabbitCfg, HareCfg, _BunnyCfg] = Config) ->
-    [Rabbit, Hare, _Bunny] = cluster_members(Config),
-    stop_join_start(Rabbit, Hare),
-    {_RConn, RCh} = rabbit_test_util:connect(RabbitCfg),
-    #'queue.declare_ok'{} =
-        amqp_channel:call(RCh, #'queue.declare'{queue   = <<"test">>,
-                                                durable = true}),
+forget_removes_things_with() -> cluster_ab.
+forget_removes_things(Cfg) ->
+    test_removes_things(Cfg, fun (R, H) -> ok = forget_cluster_node(H, R) end).
+
+reset_removes_things_with() -> cluster_ab.
+reset_removes_things(Cfg) ->
+    test_removes_things(Cfg, fun (R, _H) -> ok = reset(R) end).
 
+test_removes_things([RabbitCfg, HareCfg] = Config, LoseRabbit) ->
+    Unmirrored = <<"unmirrored-queue">>,
+    [Rabbit, Hare] = cluster_members(Config),
+    RCh = pget(channel, RabbitCfg),
+    declare(RCh, Unmirrored),
     ok = stop_app(Rabbit),
 
     {_HConn, HCh} = rabbit_test_util:connect(HareCfg),
     {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
-        (catch amqp_channel:call(HCh, #'queue.declare'{queue   = <<"test">>,
-                                                       durable = true})),
-
-    ok = forget_cluster_node(Hare, Rabbit),
+        (catch declare(HCh, Unmirrored)),
 
+    ok = LoseRabbit(Rabbit, Hare),
     {_HConn2, HCh2} = rabbit_test_util:connect(HareCfg),
-    #'queue.declare_ok'{} =
-        amqp_channel:call(HCh2, #'queue.declare'{queue   = <<"test">>,
-                                                 durable = true}),
+    declare(HCh2, Unmirrored),
+    ok.
+
+forget_offline_removes_things_with() -> cluster_ab.
+forget_offline_removes_things([Rabbit, Hare]) ->
+    Unmirrored = <<"unmirrored-queue">>,
+    X = <<"X">>,
+    RCh = pget(channel, Rabbit),
+    declare(RCh, Unmirrored),
+
+    amqp_channel:call(RCh, #'exchange.declare'{durable     = true,
+                                               exchange    = X,
+                                               auto_delete = true}),
+    amqp_channel:call(RCh, #'queue.bind'{queue    = Unmirrored,
+                                         exchange = X}),
+    ok = stop_app(pget(node, Rabbit)),
+
+    {_HConn, HCh} = rabbit_test_util:connect(Hare),
+    {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+        (catch declare(HCh, Unmirrored)),
+
+    Hare2 = rabbit_test_configs:stop_node(Hare),
+    _Rabbit2 = rabbit_test_configs:stop_node(Rabbit),
+    rabbit_test_configs:rabbitmqctl(
+      Hare2, {"forget_cluster_node --offline ~s", [pget(node, Rabbit)]}),
+    Hare3 = rabbit_test_configs:start_node(Hare2),
+
+    {_HConn2, HCh2} = rabbit_test_util:connect(Hare3),
+    declare(HCh2, Unmirrored),
+    {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+        (catch amqp_channel:call(HCh2,#'exchange.declare'{durable     = true,
+                                                          exchange    = X,
+                                                          auto_delete = true,
+                                                          passive     = true})),
+    ok.
+
+forget_promotes_offline_slave_with() ->
+    fun (Cfgs) ->
+            rabbit_test_configs:cluster(Cfgs, [a, b, c, d])
+    end.
+
+forget_promotes_offline_slave([A, B, C, D]) ->
+    ACh = pget(channel, A),
+    ANode = pget(node, A),
+    Q = <<"mirrored-queue">>,
+    declare(ACh, Q),
+    set_ha_policy(Q, A, [B, C]),
+    set_ha_policy(Q, A, [C, D]), %% Test add and remove from recoverable_slaves
+
+    %% Publish and confirm
+    amqp_channel:call(ACh, #'confirm.select'{}),
+    amqp_channel:cast(ACh, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+    amqp_channel:wait_for_confirms(ACh),
+
+    %% We kill nodes rather than stop them in order to make sure
+    %% that we aren't dependent on anything that happens as they shut
+    %% down (see bug 26467).
+    D2 = rabbit_test_configs:kill_node(D),
+    C2 = rabbit_test_configs:kill_node(C),
+    _B2 = rabbit_test_configs:kill_node(B),
+    _A2 = rabbit_test_configs:kill_node(A),
+
+    rabbit_test_configs:rabbitmqctl(C2, "force_boot"),
+
+    C3 = rabbit_test_configs:start_node(C2),
+
+    %% We should now have the following dramatis personae:
+    %% A - down, master
+    %% B - down, used to be slave, no longer is, never had the message
+    %% C - running, should be slave, but has wiped the message on restart
+    %% D - down, recoverable slave, contains message
+    %%
+    %% So forgetting A should offline-promote the queue to D, keeping
+    %% the message.
+
+    rabbit_test_configs:rabbitmqctl(C3, {"forget_cluster_node ~s", [ANode]}),
+
+    D3 = rabbit_test_configs:start_node(D2),
+    {_DConn2, DCh2} = rabbit_test_util:connect(D3),
+    #'queue.declare_ok'{message_count = 1} = declare(DCh2, Q),
+    ok.
+
+set_ha_policy(Q, MasterCfg, SlaveCfgs) ->
+    Nodes = [list_to_binary(atom_to_list(pget(node, N))) ||
+                N <- [MasterCfg | SlaveCfgs]],
+    rabbit_test_util:set_ha_policy(MasterCfg, Q, {<<"nodes">>, Nodes}),
+    await_slaves(Q, pget(node, MasterCfg), [pget(node, C) || C <- SlaveCfgs]).
+
+await_slaves(Q, MNode, SNodes) ->
+    {ok, #amqqueue{pid        = MPid,
+                   slave_pids = SPids}} =
+        rpc:call(MNode, rabbit_amqqueue, lookup,
+                 [rabbit_misc:r(<<"/">>, queue, Q)]),
+    ActMNode = node(MPid),
+    ActSNodes = lists:usort([node(P) || P <- SPids]),
+    case {MNode, lists:usort(SNodes)} of
+        {ActMNode, ActSNodes} -> ok;
+        _                     -> timer:sleep(100),
+                                 await_slaves(Q, MNode, SNodes)
+    end.
+
+force_boot_with() -> cluster_ab.
+force_boot([Rabbit, Hare]) ->
+    rabbit_test_configs:rabbitmqctl_fail(Rabbit, force_boot),
+    Rabbit2 = rabbit_test_configs:stop_node(Rabbit),
+    _Hare2 = rabbit_test_configs:stop_node(Hare),
+    rabbit_test_configs:start_node_fail(Rabbit2),
+    rabbit_test_configs:rabbitmqctl(Rabbit2, force_boot),
+    _Rabbit3 = rabbit_test_configs:start_node(Rabbit2),
     ok.
 
 change_cluster_node_type_with() -> start_abc.
@@ -260,8 +373,8 @@ change_cluster_when_node_offline(Config) ->
                           [Rabbit, Hare]),
     assert_not_clustered(Bunny).
 
-update_cluster_nodes_test_with() -> start_abc.
-update_cluster_nodes_test(Config) ->
+update_cluster_nodes_with() -> start_abc.
+update_cluster_nodes(Config) ->
     [Rabbit, Hare, Bunny] = cluster_members(Config),
 
     %% Mnesia is running...
@@ -283,9 +396,9 @@ update_cluster_nodes_test(Config) ->
     assert_not_clustered(Hare),
     assert_clustered([Rabbit, Bunny]).
 
-erlang_config_with() -> start_abc.
+erlang_config_with() -> start_ab.
 erlang_config(Config) ->
-    [Rabbit, Hare, _Bunny] = cluster_members(Config),
+    [Rabbit, Hare] = cluster_members(Config),
 
     ok = stop_app(Hare),
     ok = reset(Hare),
@@ -302,6 +415,17 @@ erlang_config(Config) ->
     assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
                           [Rabbit, Hare]),
 
+    %% Check having a stop_app'ed node around doesn't break completely.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = stop_app(Rabbit),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[Rabbit], disc}]),
+    ok = start_app(Hare),
+    ok = start_app(Rabbit),
+    assert_not_clustered(Hare),
+    assert_not_clustered(Rabbit),
+
     %% We get a warning but we start anyway
     ok = stop_app(Hare),
     ok = reset(Hare),
@@ -311,17 +435,47 @@ erlang_config(Config) ->
     assert_not_clustered(Hare),
     assert_not_clustered(Rabbit),
 
-    %% If we use a legacy config file, it still works (and a warning is emitted)
+    %% If we use a legacy config file, the node fails to start.
     ok = stop_app(Hare),
     ok = reset(Hare),
     ok = rpc:call(Hare, application, set_env,
                   [rabbit, cluster_nodes, [Rabbit]]),
-    ok = start_app(Hare),
-    assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
-                          [Rabbit, Hare]).
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
 
-force_reset_test_with() -> start_abc.
-force_reset_test(Config) ->
+    %% If we use an invalid node name, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {["Mike's computer"], disc}]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    %% If we use an invalid node type, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[Rabbit], blue}]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    %% If we use an invalid cluster_nodes conf, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, true]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, "Yes, please"]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit).
+
+force_reset_node_with() -> start_abc.
+force_reset_node(Config) ->
     [Rabbit, Hare, _Bunny] = cluster_members(Config),
 
     stop_join_start(Rabbit, Hare),
@@ -445,3 +599,10 @@ control_action(Command, Node, Args, Opts) ->
     rpc:call(Node, rabbit_control_main, action,
              [Command, Node, Args, Opts,
               fun io:format/2]).
+
+declare(Ch, Name) ->
+    Res = amqp_channel:call(Ch, #'queue.declare'{durable = true,
+                                                 queue   = Name}),
+    amqp_channel:call(Ch, #'queue.bind'{queue    = Name,
+                                        exchange = <<"amq.fanout">>}),
+    Res.
diff --git a/rabbitmq-server/plugins-src/rabbitmq-test/test/src/crashing_queues.erl b/rabbitmq-server/plugins-src/rabbitmq-test/test/src/crashing_queues.erl
new file mode 100644 (file)
index 0000000..e34fd04
--- /dev/null
@@ -0,0 +1,213 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%
+-module(crashing_queues).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_test_util, [set_ha_policy/3, a2b/1]).
+-import(rabbit_misc, [pget/2]).
+
+crashing_unmirrored_with() -> [cluster_ab].
+crashing_unmirrored([CfgA, CfgB]) ->
+    A = pget(node, CfgA),
+    ChA = pget(channel, CfgA),
+    ConnB = pget(connection, CfgB),
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    test_queue_failure(A, ChA, ConnB, 1, 0,
+                       #'queue.declare'{queue = <<"test">>, durable = true}),
+    test_queue_failure(A, ChA, ConnB, 0, 0,
+                       #'queue.declare'{queue = <<"test">>, durable = false}),
+    ok.
+
+crashing_mirrored_with() -> [cluster_ab, ha_policy_all].
+crashing_mirrored([CfgA, CfgB]) ->
+    A = pget(node, CfgA),
+    ChA = pget(channel, CfgA),
+    ConnB = pget(connection, CfgB),
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    test_queue_failure(A, ChA, ConnB, 2, 1,
+                       #'queue.declare'{queue = <<"test">>, durable = true}),
+    test_queue_failure(A, ChA, ConnB, 2, 1,
+                       #'queue.declare'{queue = <<"test">>, durable = false}),
+    ok.
+
+test_queue_failure(Node, Ch, RaceConn, MsgCount, SlaveCount, Decl) ->
+    #'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl),
+    publish(Ch, QName, transient),
+    publish(Ch, QName, durable),
+    Racer = spawn_declare_racer(RaceConn, Decl),
+    kill_queue(Node, QName),
+    assert_message_count(MsgCount, Ch, QName),
+    assert_slave_count(SlaveCount, Node, QName),
+    stop_declare_racer(Racer),
+    amqp_channel:call(Ch, #'queue.delete'{queue = QName}).
+
+give_up_after_repeated_crashes_with() -> [cluster_ab].
+give_up_after_repeated_crashes([CfgA, CfgB]) ->
+    A = pget(node, CfgA),
+    ChA = pget(channel, CfgA),
+    ChB = pget(channel, CfgB),
+    QName = <<"test">>,
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    amqp_channel:call(ChA, #'queue.declare'{queue   = QName,
+                                            durable = true}),
+    await_state(A, QName, running),
+    publish(ChA, QName, durable),
+    kill_queue_hard(A, QName),
+    {'EXIT', _} = (catch amqp_channel:call(
+                           ChA, #'queue.declare'{queue   = QName,
+                                                 durable = true})),
+    await_state(A, QName, crashed),
+    amqp_channel:call(ChB, #'queue.delete'{queue = QName}),
+    amqp_channel:call(ChB, #'queue.declare'{queue   = QName,
+                                            durable = true}),
+    await_state(A, QName, running),
+
+    %% Since it's convenient, also test absent queue status here.
+    rabbit_test_configs:stop_node(CfgB),
+    await_state(A, QName, down),
+    ok.
+
+
+publish(Ch, QName, DelMode) ->
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = QName},
+    Msg = #amqp_msg{props = #'P_basic'{delivery_mode = del_mode(DelMode)}},
+    amqp_channel:cast(Ch, Publish, Msg),
+    amqp_channel:wait_for_confirms(Ch).
+
+del_mode(transient) -> 1;
+del_mode(durable)   -> 2.
+
+spawn_declare_racer(Conn, Decl) ->
+    Self = self(),
+    spawn_link(fun() -> declare_racer_loop(Self, Conn, Decl) end).
+
+stop_declare_racer(Pid) ->
+    Pid ! stop,
+    MRef = erlang:monitor(process, Pid),
+    receive
+        {'DOWN', MRef, process, Pid, _} -> ok
+    end.
+
+declare_racer_loop(Parent, Conn, Decl) ->
+    receive
+        stop -> unlink(Parent)
+    after 0 ->
+            %% Catch here because we might happen to catch the queue
+            %% while it is in the middle of recovering and thus
+            %% explode with NOT_FOUND because crashed. Doesn't matter,
+            %% we are only in this loop to try to fool the recovery
+            %% code anyway.
+            try
+                case amqp_connection:open_channel(Conn) of
+                    {ok, Ch} -> amqp_channel:call(Ch, Decl);
+                    closing  -> ok
+                end
+            catch
+                exit:_ ->
+                    ok
+            end,
+            declare_racer_loop(Parent, Conn, Decl)
+    end.
+
+await_state(Node, QName, State) ->
+    await_state(Node, QName, State, 30000).
+
+await_state(Node, QName, State, Time) ->
+    case state(Node, QName) of
+        State ->
+            ok;
+        Other ->
+            case Time of
+                0 -> exit({timeout_awaiting_state, State, Other});
+                _ -> timer:sleep(100),
+                     await_state(Node, QName, State, Time - 100)
+            end
+    end.
+
+state(Node, QName) ->
+    V = <<"/">>,
+    Res = rabbit_misc:r(V, queue, QName),
+    [[{name,  Res},
+      {state, State}]] =
+        rpc:call(Node, rabbit_amqqueue, info_all, [V, [name, state]]),
+    State.
+
+kill_queue_hard(Node, QName) ->
+    case kill_queue(Node, QName) of
+        crashed -> ok;
+        _NewPid -> timer:sleep(100),
+                   kill_queue_hard(Node, QName)
+    end.
+
+kill_queue(Node, QName) ->
+    Pid1 = queue_pid(Node, QName),
+    exit(Pid1, boom),
+    await_new_pid(Node, QName, Pid1).
+
+queue_pid(Node, QName) ->
+    #amqqueue{pid   = QPid,
+              state = State} = lookup(Node, QName),
+    case State of
+        crashed -> case sup_child(Node, rabbit_amqqueue_sup_sup) of
+                       {ok, _}           -> QPid;   %% restarting
+                       {error, no_child} -> crashed %% given up
+                   end;
+        _       -> QPid
+    end.
+
+sup_child(Node, Sup) ->
+    case rpc:call(Node, supervisor2, which_children, [Sup]) of
+        [{_, Child, _, _}]              -> {ok, Child};
+        []                              -> {error, no_child};
+        {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup}
+    end.
+
+lookup(Node, QName) ->
+    {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+                       [rabbit_misc:r(<<"/">>, queue, QName)]),
+    Q.
+
+await_new_pid(Node, QName, OldPid) ->
+    case queue_pid(Node, QName) of
+        OldPid -> timer:sleep(10),
+                  await_new_pid(Node, QName, OldPid);
+        New    -> New
+    end.
+
+assert_message_count(Count, Ch, QName) ->
+    #'queue.declare_ok'{message_count = Count} =
+        amqp_channel:call(Ch, #'queue.declare'{queue   = QName,
+                                               passive = true}).
+
+assert_slave_count(Count, Node, QName) ->
+    Q = lookup(Node, QName),
+    [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]),
+    RealCount = case Pids of
+                    '' -> 0;
+                    _  -> length(Pids)
+                end,
+    case RealCount of
+        Count ->
+            ok;
+        _ when RealCount < Count ->
+            timer:sleep(10),
+            assert_slave_count(Count, Node, QName);
+        _ ->
+            exit({too_many_slaves, Count, RealCount})
+    end.
index 8be3eebca0a51c1217f895ddf35604f04df5c8e7..e9acb520f52c4339a21e6acdf787a43d7b6d78f0 100644 (file)
@@ -87,12 +87,13 @@ change_cluster([CfgA, _CfgB, _CfgC] = CfgsABC) ->
     %% Add D and E, D joins in
     [CfgD, CfgE] = CfgsDE = rabbit_test_configs:start_nodes(CfgA, [d, e], 5675),
     D = pget(node, CfgD),
+    E = pget(node, CfgE),
     rabbit_test_configs:add_to_cluster(CfgsABC, CfgsDE),
     assert_slaves(A, ?QNAME, {A, [B, C, D]}),
 
-    %% Remove D, E does not join in
+    %% Remove D, E joins in
     rabbit_test_configs:stop_node(CfgD),
-    assert_slaves(A, ?QNAME, {A, [B, C]}),
+    assert_slaves(A, ?QNAME, {A, [B, C, E]}),
 
     %% Clean up since we started this by hand
     rabbit_test_configs:stop_node(CfgE),
@@ -101,13 +102,11 @@ change_cluster([CfgA, _CfgB, _CfgC] = CfgsABC) ->
 rapid_change_with() -> cluster_abc.
 rapid_change([CfgA, _CfgB, _CfgC]) ->
     ACh = pget(channel, CfgA),
-    Self = self(),
-    spawn_link(
-      fun() ->
-              [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)],
-              Self ! done
-      end),
-    rapid_loop(CfgA),
+    {_Pid, MRef} = spawn_monitor(
+                     fun() ->
+                             [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)]
+                     end),
+    rapid_loop(CfgA, MRef),
     ok.
 
 rapid_amqp_ops(Ch, I) ->
@@ -125,13 +124,16 @@ rapid_amqp_ops(Ch, I) ->
     end,
     amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}).
 
-rapid_loop(Cfg) ->
-    receive done ->
-            ok
+rapid_loop(Cfg, MRef) ->
+    receive
+        {'DOWN', MRef, process, _Pid, normal} ->
+            ok;
+        {'DOWN', MRef, process, _Pid, Reason} ->
+            exit({amqp_ops_died, Reason})
     after 0 ->
             set_ha_policy(Cfg, ?POLICY, <<"all">>),
             clear_policy(Cfg, ?POLICY),
-            rapid_loop(Cfg)
+            rapid_loop(Cfg, MRef)
     end.
 
 %% Vhost deletion needs to successfully tear down policies and queues
@@ -144,6 +146,38 @@ vhost_deletion([CfgA, _CfgB]) ->
     ok = rpc:call(Node, rabbit_vhost, delete, [<<"/">>]),
     ok.
 
+promote_on_shutdown_with() -> cluster_ab.
+promote_on_shutdown([CfgA, CfgB]) ->
+    set_ha_policy(CfgA, <<"^ha.promote">>, <<"all">>,
+                  [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+    set_ha_policy(CfgA, <<"^ha.nopromote">>, <<"all">>),
+
+    ACh = pget(channel, CfgA),
+    [begin
+         amqp_channel:call(ACh, #'queue.declare'{queue   = Q,
+                                                 durable = true}),
+         publish(ACh, Q, 10)
+     end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]],
+    rabbit_test_configs:restart_node(CfgB),
+    CfgA1 = rabbit_test_configs:stop_node(CfgA),
+    {_, BCh} =  rabbit_test_util:connect(CfgB),
+    #'queue.declare_ok'{message_count = 0} = 
+        amqp_channel:call(
+          BCh, #'queue.declare'{queue   = <<"ha.promote.test">>,
+                                durable = true}),
+    ?assertExit(
+       {{shutdown, {server_initiated_close, 404, _}}, _},
+       amqp_channel:call(
+         BCh, #'queue.declare'{queue   = <<"ha.nopromote.test">>,
+                               durable = true})),
+    CfgA2 = rabbit_test_configs:start_node(CfgA1),
+    {_, ACh2} =  rabbit_test_util:connect(CfgA2),
+    #'queue.declare_ok'{message_count = 10} =
+        amqp_channel:call(
+          ACh2, #'queue.declare'{queue   = <<"ha.nopromote.test">>,
+                                 durable = true}),
+    ok.
+
 %%----------------------------------------------------------------------------
 
 assert_slaves(RPCNode, QName, Exp) ->
index 7ad6a07527ec0847d777313e7a4dc87260dea448..56b99ca7104e9c169d20c82310ab95cba2c8237d 100644 (file)
 ignore_with() -> ?CONFIG.
 ignore(Cfgs) ->
     [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
-    block_unblock([{B, C}]),
+    block_unblock([{A, B}, {A, C}]),
     timer:sleep(?DELAY),
-    [] = partitions(A),
-    [C] = partitions(B),
-    [B] = partitions(C),
+    [B, C] = partitions(A),
+    [A] = partitions(B),
+    [A] = partitions(C),
     ok.
 
-pause_on_down_with() -> ?CONFIG.
-pause_on_down([CfgA, CfgB, CfgC] = Cfgs) ->
+pause_minority_on_down_with() -> ?CONFIG.
+pause_minority_on_down([CfgA, CfgB, CfgC] = Cfgs) ->
     A = pget(node, CfgA),
     set_mode(Cfgs, pause_minority),
     true = is_running(A),
@@ -51,10 +51,34 @@ pause_on_down([CfgA, CfgB, CfgC] = Cfgs) ->
     await_running(A, false),
     ok.
 
-pause_on_blocked_with() -> ?CONFIG.
-pause_on_blocked(Cfgs) ->
+pause_minority_on_blocked_with() -> ?CONFIG.
+pause_minority_on_blocked(Cfgs) ->
     [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
     set_mode(Cfgs, pause_minority),
+    pause_on_blocked(A, B, C).
+
+pause_if_all_down_on_down_with() -> ?CONFIG.
+pause_if_all_down_on_down([_, CfgB, CfgC] = Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+    set_mode(Cfgs, {pause_if_all_down, [C], ignore}),
+    [(true = is_running(N)) || N <- [A, B, C]],
+
+    rabbit_test_util:kill(CfgB, sigkill),
+    timer:sleep(?DELAY),
+    [(true = is_running(N)) || N <- [A, C]],
+
+    rabbit_test_util:kill(CfgC, sigkill),
+    timer:sleep(?DELAY),
+    await_running(A, false),
+    ok.
+
+pause_if_all_down_on_blocked_with() -> ?CONFIG.
+pause_if_all_down_on_blocked(Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+    set_mode(Cfgs, {pause_if_all_down, [C], ignore}),
+    pause_on_blocked(A, B, C).
+
+pause_on_blocked(A, B, C) ->
     [(true = is_running(N)) || N <- [A, B, C]],
     block([{A, B}, {A, C}]),
     await_running(A, false),
@@ -77,23 +101,39 @@ pause_on_blocked(Cfgs) ->
 %% test to pass since there are a lot of things in the broker that can
 %% suddenly take several seconds to time out when TCP connections
 %% won't establish.
-pause_false_promises_mirrored_with() ->
+pause_minority_false_promises_mirrored_with() ->
     [start_ab, fun enable_dist_proxy/1,
      build_cluster, short_ticktime(10), start_connections, ha_policy_all].
 
-pause_false_promises_mirrored(Cfgs) ->
-    pause_false_promises(Cfgs).
+pause_minority_false_promises_mirrored(Cfgs) ->
+    pause_false_promises(Cfgs, pause_minority).
 
-pause_false_promises_unmirrored_with() ->
+pause_minority_false_promises_unmirrored_with() ->
     [start_ab, fun enable_dist_proxy/1,
      build_cluster, short_ticktime(10), start_connections].
 
-pause_false_promises_unmirrored(Cfgs) ->
-    pause_false_promises(Cfgs).
+pause_minority_false_promises_unmirrored(Cfgs) ->
+    pause_false_promises(Cfgs, pause_minority).
 
-pause_false_promises([CfgA, CfgB | _] = Cfgs) ->
+pause_if_all_down_false_promises_mirrored_with() ->
+    [start_ab, fun enable_dist_proxy/1,
+     build_cluster, short_ticktime(10), start_connections, ha_policy_all].
+
+pause_if_all_down_false_promises_mirrored([_, CfgB | _] = Cfgs) ->
+    B = pget(node, CfgB),
+    pause_false_promises(Cfgs, {pause_if_all_down, [B], ignore}).
+
+pause_if_all_down_false_promises_unmirrored_with() ->
+    [start_ab, fun enable_dist_proxy/1,
+     build_cluster, short_ticktime(10), start_connections].
+
+pause_if_all_down_false_promises_unmirrored([_, CfgB | _] = Cfgs) ->
+    B = pget(node, CfgB),
+    pause_false_promises(Cfgs, {pause_if_all_down, [B], ignore}).
+
+pause_false_promises([CfgA, CfgB | _] = Cfgs, ClusterPartitionHandling) ->
     [A, B] = [pget(node, Cfg) || Cfg <- Cfgs],
-    set_mode([CfgA], pause_minority),
+    set_mode([CfgA], ClusterPartitionHandling),
     ChA = pget(channel, CfgA),
     ChB = pget(channel, CfgB),
     amqp_channel:call(ChB, #'queue.declare'{queue   = <<"test">>,
@@ -164,22 +204,99 @@ prompt_disconnect_detection([CfgA, CfgB]) ->
     [] = rpc(CfgA, rabbit_amqqueue, info_all, [<<"/">>], ?DELAY),
     ok.
 
+ctl_ticktime_sync_with() -> [start_ab, short_ticktime(1)].
+ctl_ticktime_sync([CfgA | _]) ->
+    %% Server has 1s net_ticktime, make sure ctl doesn't get disconnected
+    "ok\n" = rabbit_test_configs:rabbitmqctl(CfgA, "eval 'timer:sleep(5000).'"),
+    ok.
+
+%% NB: we test full and partial partitions here.
 autoheal_with() -> ?CONFIG.
 autoheal(Cfgs) ->
-    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
     set_mode(Cfgs, autoheal),
+    do_autoheal(Cfgs).
+
+autoheal_after_pause_if_all_down_with() -> ?CONFIG.
+autoheal_after_pause_if_all_down([_, CfgB, CfgC | _] = Cfgs) ->
+    B = pget(node, CfgB),
+    C = pget(node, CfgC),
+    set_mode(Cfgs, {pause_if_all_down, [B, C], autoheal}),
+    do_autoheal(Cfgs).
+
+do_autoheal(Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
     Test = fun (Pairs) ->
                    block_unblock(Pairs),
-                   [await_running(N, true) || N <- [A, B, C]],
-                   [] = partitions(A),
-                   [] = partitions(B),
-                   [] = partitions(C)
+                   %% Sleep to make sure all the partitions are noticed
+                   %% ?DELAY for the net_tick timeout
+                   timer:sleep(?DELAY),
+                   [await_listening(N, true) || N <- [A, B, C]],
+                   [await_partitions(N, []) || N <- [A, B, C]]
            end,
     Test([{B, C}]),
     Test([{A, C}, {B, C}]),
     Test([{A, B}, {A, C}, {B, C}]),
     ok.
 
+partial_false_positive_with() -> ?CONFIG.
+partial_false_positive(Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+    block([{A, B}]),
+    timer:sleep(1000),
+    block([{A, C}]),
+    timer:sleep(?DELAY),
+    unblock([{A, B}, {A, C}]),
+    timer:sleep(?DELAY),
+    %% When B times out A's connection, it will check with C. C will
+    %% not have timed out A yet, but already it can't talk to it. We
+    %% need to not consider this a partial partition; B and C should
+    %% still talk to each other.
+    [B, C] = partitions(A),
+    [A] = partitions(B),
+    [A] = partitions(C),
+    ok.
+
+partial_to_full_with() -> ?CONFIG.
+partial_to_full(Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+    block_unblock([{A, B}]),
+    timer:sleep(?DELAY),
+    %% There are several valid ways this could go, depending on how
+    %% the DOWN messages race: either A gets disconnected first and BC
+    %% stay together, or B gets disconnected first and AC stay
+    %% together, or both make it through and all three get
+    %% disconnected.
+    case {partitions(A), partitions(B), partitions(C)} of
+        {[B, C], [A],    [A]}    -> ok;
+        {[B],    [A, C], [B]}    -> ok;
+        {[B, C], [A, C], [A, B]} -> ok;
+        Partitions               -> exit({partitions, Partitions})
+    end.
+
+partial_pause_minority_with() -> ?CONFIG.
+partial_pause_minority(Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+    set_mode(Cfgs, pause_minority),
+    block([{A, B}]),
+    [await_running(N, false) || N <- [A, B]],
+    await_running(C, true),
+    unblock([{A, B}]),
+    [await_listening(N, true) || N <- [A, B, C]],
+    [await_partitions(N, []) || N <- [A, B, C]],
+    ok.
+
+partial_pause_if_all_down_with() -> ?CONFIG.
+partial_pause_if_all_down(Cfgs) ->
+    [A, B, C] = [pget(node, Cfg) || Cfg <- Cfgs],
+    set_mode(Cfgs, {pause_if_all_down, [B], ignore}),
+    block([{A, B}]),
+    await_running(A, false),
+    [await_running(N, true) || N <- [B, C]],
+    unblock([{A, B}]),
+    [await_listening(N, true) || N <- [A, B, C]],
+    [await_partitions(N, []) || N <- [A, B, C]],
+    ok.
+
 set_mode(Cfgs, Mode) ->
     [set_env(Cfg, rabbit, cluster_partition_handling, Mode) || Cfg <- Cfgs].
 
@@ -195,7 +312,14 @@ block(Pairs)   -> [block(X, Y) || {X, Y} <- Pairs].
 unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs].
 
 partitions(Node) ->
-    rpc:call(Node, rabbit_node_monitor, partitions, []).
+    case rpc:call(Node, rabbit_node_monitor, partitions, []) of
+        {badrpc, {'EXIT', E}} = R -> case rabbit_misc:is_abnormal_exit(E) of
+                                         true  -> R;
+                                         false -> timer:sleep(1000),
+                                                  partitions(Node)
+                                     end;
+        Partitions                -> Partitions
+    end.
 
 block(X, Y) ->
     rpc:call(X, inet_tcp_proxy, block, [Y]),
@@ -205,14 +329,15 @@ allow(X, Y) ->
     rpc:call(X, inet_tcp_proxy, allow, [Y]),
     rpc:call(Y, inet_tcp_proxy, allow, [X]).
 
-await_running  (Node, Bool) -> await(Node, Bool, fun is_running/1).
-await_listening(Node, Bool) -> await(Node, Bool, fun is_listening/1).
+await_running   (Node, Bool)  -> await(Node, Bool,  fun is_running/1).
+await_listening (Node, Bool)  -> await(Node, Bool,  fun is_listening/1).
+await_partitions(Node, Parts) -> await(Node, Parts, fun partitions/1).
 
-await(Node, Bool, Fun) ->
+await(Node, Res, Fun) ->
     case Fun(Node) of
-        Bool -> ok;
-        _    -> timer:sleep(100),
-                await(Node, Bool, Fun)
+        Res -> ok;
+        _   -> timer:sleep(100),
+               await(Node, Res, Fun)
     end.
 
 is_running(Node) -> rpc:call(Node, rabbit, is_running, []).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-test/test/src/rabbit_priority_queue_test.erl b/rabbitmq-server/plugins-src/rabbitmq-test/test/src/rabbit_priority_queue_test.erl
new file mode 100644 (file)
index 0000000..44228ff
--- /dev/null
@@ -0,0 +1,335 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%
+
+-module(rabbit_priority_queue_test).
+
+-compile(export_all).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+%% The BQ API is used in all sorts of places in all sorts of
+%% ways. Therefore we have to jump through a few different hoops
+%% in order to integration-test it.
+%%
+%% * start/1, stop/0, init/3, terminate/2, delete_and_terminate/2
+%%   - starting and stopping rabbit. durable queues / persistent msgs needed
+%%     to test recovery
+%%
+%% * publish/5, drain_confirmed/1, fetch/2, ack/2, is_duplicate/2, msg_rates/1,
+%%   needs_timeout/1, timeout/1, invoke/3, resume/1 [0]
+%%   - regular publishing and consuming, with confirms and acks and durability
+%%
+%% * publish_delivered/4    - publish with acks straight through
+%% * discard/3              - publish without acks straight through
+%% * dropwhile/2            - expire messages without DLX
+%% * fetchwhile/4           - expire messages with DLX
+%% * ackfold/4              - reject messages with DLX
+%% * requeue/2              - reject messages without DLX
+%% * drop/2                 - maxlen messages without DLX
+%% * purge/1                - issue AMQP queue.purge
+%% * purge_acks/1           - mirror queue explicit sync with unacked msgs
+%% * fold/3                 - mirror queue explicit sync
+%% * depth/1                - mirror queue implicit sync detection
+%% * len/1, is_empty/1      - info items
+%% * handle_pre_hibernate/1 - hibernation
+%%
+%% * set_ram_duration_target/2, ram_duration/1, status/1
+%%   - maybe need unit testing?
+%%
+%% [0] publish enough to get credit flow from msg store
+
+recovery_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+    amqp_connection:close(Conn),
+
+    %% TODO these break coverage
+    rabbit:stop(),
+    rabbit:start(),
+
+    {Conn2, Ch2} = open(),
+    get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    delete(Ch2, Q),
+    amqp_connection:close(Conn2),
+    passed.
+
+simple_order_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+    get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    publish(Ch, Q, [2, 3, 1, 2, 3, 1, 2, 3, 1]),
+    get_all(Ch, Q, no_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    publish(Ch, Q, [3, 1, 2, 3, 1, 2, 3, 1, 2]),
+    get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+matching_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 5),
+    %% We round priority down, and 0 is the default
+    publish(Ch, Q, [undefined, 0, 5, 10, undefined]),
+    get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]),
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+resume_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 5),
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    publish_many(Ch, Q, 10000),
+    amqp_channel:wait_for_confirms(Ch),
+    amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+straight_through_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 3),
+    [begin
+         consume(Ch, Q, Ack),
+         [begin
+              publish1(Ch, Q, P),
+              assert_delivered(Ch, Ack, P)
+          end || P <- [1, 2, 3]],
+         cancel(Ch)
+     end || Ack <- [do_ack, no_ack]],
+    get_empty(Ch, Q),
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+dropwhile_fetchwhile_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    [begin
+         declare(Ch, Q, Args ++ arguments(3)),
+         publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+         timer:sleep(10),
+         get_empty(Ch, Q),
+         delete(Ch, Q)
+     end ||
+        Args <- [[{<<"x-message-ttl">>, long, 1}],
+                 [{<<"x-message-ttl">>,          long,    1},
+                  {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}]
+                ]],
+    amqp_connection:close(Conn),
+    passed.
+
+ackfold_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    Q2 = <<"test2">>,
+    declare(Ch, Q,
+            [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+             {<<"x-dead-letter-routing-key">>, longstr, Q2}
+             | arguments(3)]),
+    declare(Ch, Q2, none),
+    publish(Ch, Q, [1, 2, 3]),
+    [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+    amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+                                        multiple     = true,
+                                        requeue      = false}),
+    timer:sleep(100),
+    get_all(Ch, Q2, do_ack, [3, 2, 1]),
+    delete(Ch, Q),
+    delete(Ch, Q2),
+    amqp_connection:close(Conn),
+    passed.
+
+requeue_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+    amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+                                        multiple     = true,
+                                        requeue      = true}),
+    get_all(Ch, Q, do_ack, [3, 2, 1]),
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+drop_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+    %% We drop from the head, so this is according to the "spec" even
+    %% if not likely to be what the user wants.
+    get_all(Ch, Q, do_ack, [2, 1, 1, 1]),
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+purge_test() ->
+    {Conn, Ch} = open(),
+    Q = <<"test">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    amqp_channel:call(Ch, #'queue.purge'{queue = Q}),
+    get_empty(Ch, Q),
+    delete(Ch, Q),
+    amqp_connection:close(Conn),
+    passed.
+
+ram_duration_test() ->
+    QName = rabbit_misc:r(<<"/">>, queue, <<"pseudo">>),
+    Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+    Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 5}]},
+    PQ = rabbit_priority_queue,
+    BQS1 = PQ:init(Q, new, fun(_, _) -> ok end),
+    {_Duration1, BQS2} = PQ:ram_duration(BQS1),
+    BQS3 = PQ:set_ram_duration_target(infinity, BQS2),
+    BQS4 = PQ:set_ram_duration_target(1, BQS3),
+    {_Duration2, BQS5} = PQ:ram_duration(BQS4),
+    PQ:delete_and_terminate(a_whim, BQS5),
+    passed.
+
+mirror_queue_sync_with() -> cluster_ab.
+mirror_queue_sync([CfgA, _CfgB]) ->
+    Ch = pget(channel, CfgA),
+    Q = <<"test">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    ok = rabbit_test_util:set_ha_policy(CfgA, <<".*">>, <<"all">>),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3]),
+    %% master now has 9, slave 6.
+    get_partial(Ch, Q, manual_ack, [3, 3, 3, 2, 2, 2]),
+    %% So some but not all are unacked at the slave
+    rabbit_test_util:control_action(sync_queue, CfgA, [binary_to_list(Q)],
+                                    [{"-p", "/"}]),
+    wait_for_sync(CfgA, rabbit_misc:r(<<"/">>, queue, Q)),
+    passed.
+
+%%----------------------------------------------------------------------------
+
+open() ->
+    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    {Conn, Ch}.
+
+declare(Ch, Q, Args) when is_list(Args) ->
+    amqp_channel:call(Ch, #'queue.declare'{queue     = Q,
+                                           durable   = true,
+                                           arguments = Args});
+declare(Ch, Q, Max) ->
+    declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+    amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    [publish1(Ch, Q, P) || P <- Ps],
+    amqp_channel:wait_for_confirms(Ch).
+
+publish_many(_Ch, _Q, 0) -> ok;
+publish_many( Ch,  Q, N) -> publish1(Ch, Q, random:uniform(5)),
+                            publish_many(Ch, Q, N - 1).
+
+publish1(Ch, Q, P) ->
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = props(P),
+                                payload = priority2bin(P)}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P)         -> #'P_basic'{priority      = P,
+                               delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue        = Q,
+                                                no_ack       = Ack =:= no_ack,
+                                                consumer_tag = <<"ctag">>},
+                           self()),
+    receive
+        #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+             ok
+    end.
+
+cancel(Ch) ->
+    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, P) ->
+    PBin = priority2bin(P),
+    receive
+        {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+            ?assertEqual(PBin, PBin2),
+            maybe_ack(Ch, Ack, DTag)
+    end.
+
+get_all(Ch, Q, Ack, Ps) ->
+    DTags = get_partial(Ch, Q, Ack, Ps),
+    get_empty(Ch, Q),
+    DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+    [get_ok(Ch, Q, Ack, P) || P <- Ps].
+
+get_empty(Ch, Q) ->
+    #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, P) ->
+    PBin = priority2bin(P),
+    {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+        amqp_channel:call(Ch, #'basic.get'{queue  = Q,
+                                           no_ack = Ack =:= no_ack}),
+    ?assertEqual(PBin, PBin2),
+    maybe_ack(Ch, Ack, DTag).
+
+maybe_ack(Ch, do_ack, DTag) ->
+    amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+    DTag;
+maybe_ack(_Ch, _, DTag) ->
+    DTag.
+
+arguments(none) -> [];
+arguments(Max)  -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int)       -> list_to_binary(integer_to_list(Int)).
+
+%%----------------------------------------------------------------------------
+
+wait_for_sync(Cfg, Q) ->
+    case synced(Cfg, Q) of
+        true  -> ok;
+        false -> timer:sleep(100),
+                 wait_for_sync(Cfg, Q)
+    end.
+
+synced(Cfg, Q) ->
+    Info = rpc:call(pget(node, Cfg),
+                    rabbit_amqqueue, info_all,
+                    [<<"/">>, [name, synchronised_slave_pids]]),
+    [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info,
+                        Q =:= Q1],
+    length(SSPids) =:= 1.
+
+%%----------------------------------------------------------------------------
index 7b13f0fdae940fd6075122258d82508879944e2b..389ff233aa55c7a03959e7441f7edf9c4a6e87a1 100644 (file)
@@ -35,6 +35,26 @@ rapid_redeclare([CfgA | _]) ->
      end || _I <- lists:seq(1, 20)],
     ok.
 
+%% Check that by the time we get a declare-ok back, the slaves are up
+%% and in Mnesia.
+declare_synchrony_with() -> [cluster_ab, ha_policy_all].
+declare_synchrony([Rabbit, Hare]) ->
+    RabbitCh = pget(channel, Rabbit),
+    HareCh = pget(channel, Hare),
+    Q = <<"mirrored-queue">>,
+    declare(RabbitCh, Q),
+    amqp_channel:call(RabbitCh, #'confirm.select'{}),
+    amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+    amqp_channel:wait_for_confirms(RabbitCh),
+    _Rabbit2 = rabbit_test_configs:kill_node(Rabbit),
+
+    #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q),
+    ok.
+
+declare(Ch, Name) ->
+    amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}).
+
 consume_survives_stop_with()     -> ?CONFIG.
 consume_survives_sigkill_with()  -> ?CONFIG.
 consume_survives_policy_with()   -> ?CONFIG.
diff --git a/rabbitmq-server/plugins-src/rabbitmq-tracing/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-tracing/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index c91d4149144fc1f6b9934ed9d528061b64008a4c..61b5ae4c1e8d74c20073d781482123aadc734d9e 100644 (file)
@@ -36,5 +36,7 @@ Example for how to create a trace:
 
 $ curl -i -u guest:guest -H "content-type:application/json" -XPUT \
   http://localhost:55672/api/traces/%2f/my-trace \
-  -d'{"format":"text","pattern":"#"}'
+  -d'{"format":"text","pattern":"#", "max_payload_bytes":1000}'
 
+max_payload_bytes is optional (omit it to prevent payload truncation),
+format and pattern are mandatory.
\ No newline at end of file
index b8c63544065b25b0866643988f1dfbf3b14f278f..470527d569553d954b9c35260b46ce2c35fbca55 100644 (file)
@@ -16,6 +16,7 @@
                 <th>Name</th>
                 <th>Pattern</th>
                 <th>Format</th>
+                <th>Payload limit</th>
                 <th>Rate</th>
                 <th>Queued</th>
                 <th></th>
                 <td><%= fmt_string(trace.name) %></td>
                 <td><%= fmt_string(trace.pattern) %></td>
                 <td><%= fmt_string(trace.format) %></td>
+                <td class="c"><%= fmt_string(trace.max_payload_bytes, 'Unlimited') %></td>
                 <% if (trace.queue) { %>
                 <td class="r">
-                  <%= fmt_rate(trace.queue.message_stats, 'ack', false) %>
+                  <%= fmt_detail_rate(trace.queue.message_stats, 'deliver_no_ack') %>
                 </td>
                 <td class="r">
                   <%= trace.queue.messages %>
             </select>
           </td>
         </tr>
+        <tr>
+          <th><label>Max payload bytes: <span class="help" id="tracing-max-payload"></span></label></th>
+          <td>
+            <input type="text" name="max_payload_bytes" value=""/>
+          </td>
+        </tr>
         <tr>
           <th><label>Pattern:</label></th>
           <td>
index 89852ba5b38bf3996f43ee671126df3ee9316be9..73c8b09505c1da0ce3209c3bc9cf93a56f4abc0e 100644 (file)
@@ -11,6 +11,13 @@ dispatcher_add(function(sammy) {
                 'trace', '#/traces');
         });
     sammy.put('#/traces', function() {
+            if (this.params['max_payload_bytes'] === '') {
+                delete this.params['max_payload_bytes'];
+            }
+            else {
+                this.params['max_payload_bytes'] =
+                    parseInt(this.params['max_payload_bytes']);
+            }
             if (sync_put(this, '/traces/:vhost/:name'))
                 update();
             return false;
@@ -29,8 +36,11 @@ dispatcher_add(function(sammy) {
 
 NAVIGATION['Admin'][0]['Tracing'] = ['#/traces', 'administrator'];
 
+HELP['tracing-max-payload'] =
+    'Maximum size of payload to log, in bytes. Payloads larger than this limit will be truncated. Leave blank to prevent truncation. Set to 0 to prevent logging of payload altogether.';
+
 function link_trace(name) {
-    return _link_to(fmt_escape_html(name), 'api/trace-files/' + esc(name));
+    return _link_to(name, 'api/trace-files/' + esc(name));
 }
 
 function link_trace_queue(trace) {
index bfa249f6f7911d5b8dc1070cdb2a5370995a154e..815855bb1e28d83c29edcfd0f1174662eca9e92f 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_app).
index 219422661debb840f1fd139a60b8200cc4157436..ca2273bc852cd0d8f384d35d76fdb429ffdba488 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_consumer).
 
 -import(rabbit_misc, [pget/2, pget/3, table_lookup/2]).
 
--record(state, {conn, ch, vhost, queue, file, filename, format}).
--record(log_record, {timestamp, type, exchange, queue, node, routing_keys,
+-record(state, {conn, ch, vhost, queue, file, filename, format, buf, buf_cnt,
+                max_payload}).
+-record(log_record, {timestamp, type, exchange, queue, node, connection,
+                     vhost, username, channel, routing_keys, routed_queues,
                      properties, payload}).
 
 -define(X, <<"amq.rabbitmq.trace">>).
+-define(MAX_BUF, 100).
 
 -export([start_link/1, info_all/1]).
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
@@ -44,6 +47,7 @@ init(Args) ->
     process_flag(trap_exit, true),
     Name = pget(name, Args),
     VHost = pget(vhost, Args),
+    MaxPayload = pget(max_payload_bytes, Args, unlimited),
     {ok, Conn} = amqp_connection:start(
                    #amqp_params_direct{virtual_host = VHost}),
     link(Conn),
@@ -56,16 +60,15 @@ init(Args) ->
     amqp_channel:call(
       Ch, #'queue.bind'{exchange = ?X, queue = Q,
                         routing_key = pget(pattern, Args)}),
-    #'basic.qos_ok'{} =
-        amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 10}),
+    amqp_channel:enable_delivery_flow_control(Ch),
     #'basic.consume_ok'{} =
         amqp_channel:subscribe(Ch, #'basic.consume'{queue  = Q,
-                                                    no_ack = false}, self()),
+                                                    no_ack = true}, self()),
     {ok, Dir} = application:get_env(directory),
     Filename = Dir ++ "/" ++ binary_to_list(Name) ++ ".log",
     case filelib:ensure_dir(Filename) of
         ok ->
-            case file:open(Filename, [append]) of
+            case prim_file:open(Filename, [append]) of
                 {ok, F} ->
                     rabbit_tracing_traces:announce(VHost, Name, self()),
                     Format = list_to_atom(binary_to_list(pget(format, Args))),
@@ -73,7 +76,8 @@ init(Args) ->
                                     "format ~p~n", [Filename, Format]),
                     {ok, #state{conn = Conn, ch = Ch, vhost = VHost, queue = Q,
                                 file = F, filename = Filename,
-                                format = Format}};
+                                format = Format, buf = [], buf_cnt = 0,
+                                max_payload = MaxPayload}};
                 {error, E} ->
                     {stop, {could_not_open, Filename, E}}
             end;
@@ -93,21 +97,25 @@ handle_call(_Req, _From, State) ->
 handle_cast(_C, State) ->
     {noreply, State}.
 
-handle_info(Delivery = {#'basic.deliver'{delivery_tag = Seq}, #amqp_msg{}},
-            State    = #state{ch = Ch, file = F, format = Format}) ->
-    Print = fun(Fmt, Args) -> io:format(F, Fmt, Args) end,
-    log(Format, Print, delivery_to_log_record(Delivery)),
-    amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = Seq}),
-    {noreply, State};
+handle_info({BasicDeliver, Msg, DeliveryCtx},
+            State = #state{format = Format}) ->
+    amqp_channel:notify_received(DeliveryCtx),
+    {noreply, log(Format, delivery_to_log_record({BasicDeliver, Msg}, State),
+                  State),
+     0};
+
+handle_info(timeout, State) ->
+    {noreply, flush(State)};
 
 handle_info(_I, State) ->
     {noreply, State}.
 
-terminate(shutdown, #state{conn = Conn, ch = Ch,
-                           file = F, filename = Filename}) ->
+terminate(shutdown, State = #state{conn = Conn, ch = Ch,
+                                   file = F, filename = Filename}) ->
+    flush(State),
     catch amqp_channel:close(Ch),
     catch amqp_connection:close(Conn),
-    catch file:close(F),
+    catch prim_file:close(F),
     rabbit_log:info("Tracer closed log file ~p~n", [Filename]),
     ok;
 
@@ -120,49 +128,104 @@ code_change(_, State, _) -> {ok, State}.
 
 delivery_to_log_record({#'basic.deliver'{routing_key = Key},
                         #amqp_msg{props   = #'P_basic'{headers = H},
-                                  payload = Payload}}) ->
-    {Type, Q} = case Key of
-                    <<"publish.", _Rest/binary>> -> {published, none};
-                    <<"deliver.", Rest/binary>>  -> {received,  Rest}
-                end,
-    {longstr, Node} = table_lookup(H, <<"node">>),
-    {longstr, X}    = table_lookup(H, <<"exchange_name">>),
-    {array, Keys}   = table_lookup(H, <<"routing_keys">>),
-    {table, Props}  = table_lookup(H, <<"properties">>),
-    #log_record{timestamp    = rabbit_mgmt_format:timestamp(os:timestamp()),
+                                  payload = Payload}}, State) ->
+    {Type, Q, RQs} = case Key of
+                         <<"publish.", _Rest/binary>> ->
+                             {array, Qs} = table_lookup(H, <<"routed_queues">>),
+                             {published, none, [Q || {_, Q} <- Qs]};
+                         <<"deliver.", Rest/binary>> ->
+                             {received,  Rest, none}
+                     end,
+    {longstr, Node}   = table_lookup(H, <<"node">>),
+    {longstr, X}      = table_lookup(H, <<"exchange_name">>),
+    {array, Keys}     = table_lookup(H, <<"routing_keys">>),
+    {table, Props}    = table_lookup(H, <<"properties">>),
+    {longstr, Conn}   = table_lookup(H, <<"connection">>),
+    {longstr, VHost}  = table_lookup(H, <<"vhost">>),
+    {longstr, User}   = table_lookup(H, <<"user">>),
+    {signedint, Chan} = table_lookup(H, <<"channel">>),
+    #log_record{timestamp    = rabbit_mgmt_format:now_to_str_ms(os:timestamp()),
                 type         = Type,
                 exchange     = X,
                 queue        = Q,
                 node         = Node,
+                connection   = Conn,
+                vhost        = VHost,
+                username     = User,
+                channel      = Chan,
                 routing_keys = [K || {_, K} <- Keys],
+                routed_queues= RQs,
                 properties   = Props,
-                payload      = Payload}.
-
-log(text, P, Record) ->
-    P("~n~s~n", [string:copies("=", 80)]),
-    P("~s: ", [Record#log_record.timestamp]),
-    case Record#log_record.type of
-        published -> P("Message published~n~n", []);
-        received  -> P("Message received~n~n", [])
-    end,
-    P("Node:         ~s~n", [Record#log_record.node]),
-    P("Exchange:     ~s~n", [Record#log_record.exchange]),
-    case Record#log_record.queue of
-        none -> ok;
-        Q    -> P("Queue:        ~s~n", [Q])
-    end,
-    P("Routing keys: ~p~n", [Record#log_record.routing_keys]),
-    P("Properties:   ~p~n", [Record#log_record.properties]),
-    P("Payload: ~n~s~n",    [Record#log_record.payload]);
-
-log(json, P, Record) ->
-    P("~s~n", [mochijson2:encode(
-                 [{timestamp,    Record#log_record.timestamp},
-                  {type,         Record#log_record.type},
-                  {node,         Record#log_record.node},
-                  {exchange,     Record#log_record.exchange},
-                  {queue,        Record#log_record.queue},
-                  {routing_keys, Record#log_record.routing_keys},
-                  {properties,   rabbit_mgmt_format:amqp_table(
+                payload      = truncate(Payload, State)}.
+
+log(text, Record, State) ->
+    Fmt = "~n========================================"
+        "========================================~n~s: Message ~s~n~n"
+        "Node:         ~s~nConnection:   ~s~n"
+        "Virtual host: ~s~nUser:         ~s~n"
+        "Channel:      ~p~nExchange:     ~s~n"
+        "Routing keys: ~p~n" ++
+        case Record#log_record.queue of
+            none -> "";
+            _    -> "Queue:        ~s~n"
+        end ++
+        case Record#log_record.routed_queues of
+            none -> "";
+            _    -> "Routed queues: ~p~n"
+        end ++
+        "Properties:   ~p~nPayload: ~n~s~n",
+    Args =
+        [Record#log_record.timestamp,
+         Record#log_record.type,
+         Record#log_record.node,    Record#log_record.connection,
+         Record#log_record.vhost,   Record#log_record.username,
+         Record#log_record.channel, Record#log_record.exchange,
+         Record#log_record.routing_keys] ++
+        case Record#log_record.queue of
+            none -> [];
+            Q    -> [Q]
+        end ++
+        case Record#log_record.routed_queues of
+            none -> [];
+            RQs  -> [RQs]
+        end ++
+        [Record#log_record.properties, Record#log_record.payload],
+    print_log(io_lib:format(Fmt, Args), State);
+
+log(json, Record, State) ->
+    print_log(mochijson2:encode(
+                [{timestamp,    Record#log_record.timestamp},
+                 {type,         Record#log_record.type},
+                 {node,         Record#log_record.node},
+                 {connection,   Record#log_record.connection},
+                 {vhost,        Record#log_record.vhost},
+                 {user,         Record#log_record.username},
+                 {channel,      Record#log_record.channel},
+                 {exchange,     Record#log_record.exchange},
+                 {queue,        Record#log_record.queue},
+                 {routed_queues, Record#log_record.routed_queues},
+                 {routing_keys, Record#log_record.routing_keys},
+                 {properties,   rabbit_mgmt_format:amqp_table(
                                    Record#log_record.properties)},
-                  {payload,      base64:encode(Record#log_record.payload)}])]).
+                 {payload,      base64:encode(Record#log_record.payload)}])
+              ++ "\n",
+              State).
+
+print_log(LogMsg, State = #state{buf = Buf, buf_cnt = BufCnt}) ->
+    maybe_flush(State#state{buf = [LogMsg | Buf], buf_cnt = BufCnt + 1}).
+
+maybe_flush(State = #state{buf_cnt = ?MAX_BUF}) ->
+    flush(State);
+maybe_flush(State) ->
+    State.
+
+flush(State = #state{file = F, buf = Buf}) ->
+    prim_file:write(F, lists:reverse(Buf)),
+    State#state{buf = [], buf_cnt = 0}.
+
+truncate(Payload, #state{max_payload = Max}) ->
+    case Max =:= unlimited orelse size(Payload) =< Max of
+        true  -> Payload;
+        false -> <<Trunc:Max/binary, _/binary>> = Payload,
+                 Trunc
+    end.
index 10289a609c8796370b57c0a12bcd54e86fe6195b..8dbc22f9da1122d50a5f71948eb521ce26f1bf66 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ Federation.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_consumer_sup).
index 2005fdf2dd72054027d9bc015fdc218c3c9b5a7b..c5520e7a3e3487a5c17fde1ae0d6ae27d4ff7cde 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_files).
index 25a4bd8772409baed360593ef37c4fe37bc1af9a..3d2d44dfd9a78197c704a06257404f5be6613f82 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_mgmt).
index e0f352efea3ee5abe322ee8a710cee9742012199..502ef7e52342dcb0e2e163a582c6e3e668536164 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_sup).
index 7fc7c147f55d310ee031e6822bef1806aa2e85b1..53336d70fd95392dd77de3658c51f81b64afc23a 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_traces).
index 4d67f73445001868b68cb6c7e979697dcd96dafb..30a134b910e7aec9c81c74a36fe84b624c99d9b3 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 
 -module(rabbit_tracing_wm_file).
 
index b19cfba422bba7cac060d8ed3246aa1f2ebdcd19..d3a8004f256af57df2d62f6b33419f3fd87e6347 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_wm_files).
index d7f8acbf12aa8cc342ffb2f605de4b2a1616c029..a9cdbacc4cafa46ab52fdca658f7fa1a10ca8fe9 100644 (file)
@@ -11,7 +11,7 @@
 %%   The Original Code is RabbitMQ.
 %%
 %%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%   Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 
 -module(rabbit_tracing_wm_trace).
 
@@ -23,6 +23,8 @@
 -define(ERR, <<"Something went wrong trying to start the trace - check the "
                "logs.">>).
 
+-import(rabbit_misc, [pget/2, pget/3]).
+
 -include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
@@ -47,20 +49,24 @@ resource_exists(ReqData, Context) ->
 to_json(ReqData, Context) ->
     rabbit_mgmt_util:reply(trace(ReqData), ReqData, Context).
 
-accept_content(ReqData, Context) ->
-    case rabbit_mgmt_util:vhost(ReqData) of
-        not_found -> not_found;
-        VHost     -> Name = rabbit_mgmt_util:id(name, ReqData),
-                     rabbit_mgmt_util:with_decode(
-                       [format], ReqData, Context,
-                       fun([_], Trace) ->
-                               case rabbit_tracing_traces:create(
-                                      VHost, Name, Trace) of
-                                   {ok, _} -> {true, ReqData, Context};
-                                   _       -> rabbit_mgmt_util:bad_request(
-                                                ?ERR, ReqData, Context)
-                               end
-                       end)
+accept_content(RD, Ctx) ->
+    case rabbit_mgmt_util:vhost(RD) of
+        not_found ->
+            not_found;
+        VHost ->
+            Name = rabbit_mgmt_util:id(name, RD),
+            rabbit_mgmt_util:with_decode(
+              [format, pattern], RD, Ctx,
+              fun([_, _], Trace) ->
+                      Fs = [fun val_payload_bytes/3, fun val_format/3,
+                            fun val_create/3],
+                      case lists:foldl(fun (F,  ok)  -> F(VHost, Name, Trace);
+                                           (_F, Err) -> Err
+                                       end, ok, Fs) of
+                          ok  -> {true, RD, Ctx};
+                          Err -> rabbit_mgmt_util:bad_request(Err, RD, Ctx)
+                      end
+              end)
     end.
 
 delete_resource(ReqData, Context) ->
@@ -80,3 +86,21 @@ trace(ReqData) ->
         VHost     -> rabbit_tracing_traces:lookup(
                        VHost, rabbit_mgmt_util:id(name, ReqData))
     end.
+
+val_payload_bytes(_VHost, _Name, Trace) ->
+    case is_integer(pget(max_payload_bytes, Trace, 0)) of
+        false -> <<"max_payload_bytes not integer">>;
+        true  -> ok
+    end.
+
+val_format(_VHost, _Name, Trace) ->
+    case lists:member(pget(format, Trace), [<<"json">>, <<"text">>]) of
+        false -> <<"format not json or text">>;
+        true  -> ok
+    end.
+
+val_create(VHost, Name, Trace) ->
+    case rabbit_tracing_traces:create(VHost, Name, Trace) of
+        {ok, _} -> ok;
+        _       -> ?ERR
+    end.
index 9f423557eee13dc9e553a5f5bf9cd7b3e35817a1..ef0fe50a7123006698c516bdb5cdddd81ed4df67 100644 (file)
@@ -11,7 +11,7 @@
 %%  The Original Code is RabbitMQ.
 %%
 %%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%%  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_wm_traces).
index 1c0ed0afbc18eee6f3799342327de1296332eace..df184ebe4252c86fce2a7b8096690ac2694dde96 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_tracing_test).
@@ -69,6 +69,21 @@ tracing_test() ->
     http_delete("/trace-files/test.log", ?NO_CONTENT),
     ok.
 
+tracing_validation_test() ->
+    Path = "/traces/%2f/test",
+    http_put(Path, [{pattern,           <<"#">>}],    ?BAD_REQUEST),
+    http_put(Path, [{format,            <<"json">>}], ?BAD_REQUEST),
+    http_put(Path, [{format,            <<"ebcdic">>},
+                    {pattern,           <<"#">>}],    ?BAD_REQUEST),
+    http_put(Path, [{format,            <<"text">>},
+                    {pattern,           <<"#">>},
+                    {max_payload_bytes, <<"abc">>}],  ?BAD_REQUEST),
+    http_put(Path, [{format,            <<"json">>},
+                    {pattern,           <<"#">>},
+                    {max_payload_bytes, 1000}],       ?NO_CONTENT),
+    http_delete(Path, ?NO_CONTENT),
+    ok.
+
 %%---------------------------------------------------------------------------
 %% Below is copypasta from rabbit_mgmt_test_http, it's not obvious how
 %% to share that given the build system.
diff --git a/rabbitmq-server/plugins-src/rabbitmq-web-dispatch/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-web-dispatch/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 93eb4d4a6b039b833d6b0eea7c817e61269a0ea2..e15530adaf21be898815c51849bca9cab00e8b77 100644 (file)
@@ -112,7 +112,7 @@ handle_call(list_all, _From, undefined) ->
     {reply, list(), undefined};
 
 handle_call(Req, _From, State) ->
-    error_logger:format("Unexpected call to ~p: ~p~n", [?MODULE, Req]),
+    rabbit_log:error("Unexpected call to ~p: ~p~n", [?MODULE, Req]),
     {stop, unknown_request, State}.
 
 handle_cast(_, State) ->
index 83be7b19a9e607a0c451ef8210252da5abfbdb3b..5582d47a36670b13699137ee7a70dccbde05a65a 100644 (file)
@@ -64,8 +64,8 @@ init([]) ->
 mochi_options(Listener) ->
     [{name, name(Listener)},
      {loop, loopfun(Listener)} |
-     easy_ssl(proplists:delete(
-                name, proplists:delete(ignore_in_use, Listener)))].
+     ssl_config(proplists:delete(
+                  name, proplists:delete(ignore_in_use, Listener)))].
 
 loopfun(Listener) ->
     fun (Req) ->
@@ -83,19 +83,31 @@ name(Listener) ->
     Port = proplists:get_value(port, Listener),
     list_to_atom(atom_to_list(?MODULE) ++ "_" ++ integer_to_list(Port)).
 
-easy_ssl(Options) ->
-    case {proplists:get_value(ssl, Options),
-          proplists:get_value(ssl_opts, Options)} of
-        {true, undefined} ->
-            {ok, ServerOpts} = application:get_env(rabbit, ssl_options),
-            SSLOpts = [{K, V} ||
-                          {K, V} <- ServerOpts,
-                          not lists:member(K, [verify, fail_if_no_peer_cert])],
-            [{ssl_opts, SSLOpts}|Options];
-        _ ->
-            Options
+ssl_config(Options) ->
+    case proplists:get_value(ssl, Options) of
+        true -> rabbit_networking:ensure_ssl(),
+                case rabbit_networking:poodle_check('HTTP') of
+                    ok     -> case proplists:get_value(ssl_opts, Options) of
+                                  undefined -> auto_ssl(Options);
+                                  _         -> fix_ssl(Options)
+                              end;
+                    danger -> proplists:delete(ssl, Options)
+                end;
+        _    -> Options
     end.
 
+auto_ssl(Options) ->
+    {ok, ServerOpts} = application:get_env(rabbit, ssl_options),
+    Remove = [verify, fail_if_no_peer_cert],
+    SSLOpts = [{K, V} || {K, V} <- ServerOpts,
+                         not lists:member(K, Remove)],
+    fix_ssl([{ssl_opts, SSLOpts} | Options]).
+
+fix_ssl(Options) ->
+    SSLOpts = proplists:get_value(ssl_opts, Options),
+    rabbit_misc:pset(ssl_opts,
+                     rabbit_networking:fix_ssl_options(SSLOpts), Options).
+
 check_error(Listener, Error) ->
     Ignore = proplists:get_value(ignore_in_use, Listener, false),
     case {Error, Ignore} of
index b62e7adbf95c25e0ae7b811ac04066bafe51707f..a199a31b9413d13bedbe597e849cf5641b542135 100644 (file)
@@ -22,7 +22,8 @@
 -export([makeloop/1, setup/0]).
 
 setup() ->
-    application:set_env(webmachine, error_handler, webmachine_error_handler).
+    application:set_env(
+      webmachine, error_handler, rabbit_webmachine_error_handler).
 
 makeloop(Dispatch) ->
     fun (MochiReq) ->
@@ -33,11 +34,11 @@ makeloop(Dispatch) ->
             %% however, we don't need to dispatch by the host name.
             case webmachine_dispatcher:dispatch(Path, Dispatch, ReqData) of
                 {no_dispatch_match, _Host, _PathElements} ->
-                    {ErrorHTML, ReqState1} =
-                        webmachine_error_handler:render_error(
+                    {ErrorBody, ReqState1} =
+                        rabbit_webmachine_error_handler:render_error(
                           404, Req, {none, none, []}),
                     Req1 = {webmachine_request, ReqState1},
-                    {ok, ReqState2} = Req1:append_to_response_body(ErrorHTML),
+                    {ok, ReqState2} = Req1:append_to_response_body(ErrorBody),
                     Req2 = {webmachine_request, ReqState2},
                     {ok, ReqState3} = Req2:send_response(404),
                     maybe_log_access(ReqState3);
diff --git a/rabbitmq-server/plugins-src/rabbitmq-web-dispatch/src/rabbit_webmachine_error_handler.erl b/rabbitmq-server/plugins-src/rabbitmq-web-dispatch/src/rabbit_webmachine_error_handler.erl
new file mode 100644 (file)
index 0000000..849e5b9
--- /dev/null
@@ -0,0 +1,57 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%
+
+%% We need to ensure all responses are application/json; anything
+%% coming back as text/html could constitute an XSS vector. Also I'm
+%% sure it's easier on our clients if they can always expect JSON
+%% responses.
+%%
+%% Based on webmachine_error_handler, but I'm not sure enough remains
+%% to be copyrightable.
+
+-module(rabbit_webmachine_error_handler).
+
+-export([render_error/3]).
+
+render_error(Code, Req, Reason) ->
+    case Req:has_response_body() of
+        {true, _}  -> maybe_log(Req, Reason),
+                      Req:response_body();
+        {false, _} -> render_error_body(Code, Req:trim_state(), Reason)
+    end.
+
+render_error_body(404,  Req, Reason) -> error_body(404,  Req, "Not Found");
+render_error_body(Code, Req, Reason) -> error_body(Code, Req, Reason).
+
+error_body(Code, Req, Reason) ->
+    {ok, ReqState} = Req:add_response_header("Content-Type","application/json"),
+    case Code of
+        500 -> maybe_log(Req, Reason);
+        _   -> ok
+    end,
+    Json = {struct,
+            [{error,  list_to_binary(httpd_util:reason_phrase(Code))},
+             {reason, list_to_binary(rabbit_misc:format("~p~n", [Reason]))}]},
+    {mochijson2:encode(Json), ReqState}.
+
+maybe_log(_Req, {error, {exit, normal, _Stack}}) ->
+    %% webmachine_request did an exit(normal), so suppress this
+    %% message. This usually happens when a chunked upload is
+    %% interrupted by network failure.
+    ok;
+maybe_log(Req, Reason) ->
+    {Path, _} = Req:path(),
+    error_logger:error_msg("webmachine error: path=~p~n~p~n", [Path, Reason]).
diff --git a/rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index d9a4b6e80975389f10519b32fe4591715ba3348c..0008d57f07e3fd2f2d2e7dd9fb5fb28d88387489 100644 (file)
@@ -1,4 +1,3 @@
-
 RabbitMQ-Web-Stomp-Examples plugin
 ==================================
 
index bb0d34cc2711c5be7dae413d7d92537cdf5d2b50..15f686df8c8262aa066cec1246126d8e8175dc16 100644 (file)
@@ -1,7 +1,7 @@
 <!doctype html>
 <html><head>
     <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"></script>
-    <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+    <script src="sockjs-0.3.js"></script>
     <script src="stomp.js"></script>
 
     <style>
index 752e18f0196c3a1069cef9a42ef0b942f25c7967..2119dfd0f6006a818a3891aa6911f777fb1fbbb7 100644 (file)
@@ -1,7 +1,7 @@
 <!DOCTYPE html>
 <html><head>
   <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.2/jquery.min.js"></script>
-  <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+  <script src="sockjs-0.3.js"></script>
   <script src="stomp.js"></script>
   <style>
       .box {
diff --git a/rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/priv/sockjs-0.3.js b/rabbitmq-server/plugins-src/rabbitmq-web-stomp-examples/priv/sockjs-0.3.js
new file mode 100644 (file)
index 0000000..585215c
--- /dev/null
@@ -0,0 +1,2379 @@
+/* SockJS client, version 0.3.4, http://sockjs.org, MIT License
+
+Copyright (c) 2011-2012 VMware, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+// JSON2 by Douglas Crockford (minified).
+var JSON;JSON||(JSON={}),function(){function str(a,b){var c,d,e,f,g=gap,h,i=b[a];i&&typeof i=="object"&&typeof i.toJSON=="function"&&(i=i.toJSON(a)),typeof rep=="function"&&(i=rep.call(b,a,i));switch(typeof i){case"string":return quote(i);case"number":return isFinite(i)?String(i):"null";case"boolean":case"null":return String(i);case"object":if(!i)return"null";gap+=indent,h=[];if(Object.prototype.toString.apply(i)==="[object Array]"){f=i.length;for(c=0;c<f;c+=1)h[c]=str(c,i)||"null";e=h.length===0?"[]":gap?"[\n"+gap+h.join(",\n"+gap)+"\n"+g+"]":"["+h.join(",")+"]",gap=g;return e}if(rep&&typeof rep=="object"){f=rep.length;for(c=0;c<f;c+=1)typeof rep[c]=="string"&&(d=rep[c],e=str(d,i),e&&h.push(quote(d)+(gap?": ":":")+e))}else for(d in i)Object.prototype.hasOwnProperty.call(i,d)&&(e=str(d,i),e&&h.push(quote(d)+(gap?": ":":")+e));e=h.length===0?"{}":gap?"{\n"+gap+h.join(",\n"+gap)+"\n"+g+"}":"{"+h.join(",")+"}",gap=g;return e}}function quote(a){escapable.lastIndex=0;return escapable.test(a)?'"'+a.replace(escapable,function(a){var b=meta[a];return typeof b=="string"?b:"\\u"+("0000"+a.charCodeAt(0).toString(16)).slice(-4)})+'"':'"'+a+'"'}function f(a){return a<10?"0"+a:a}"use strict",typeof Date.prototype.toJSON!="function"&&(Date.prototype.toJSON=function(a){return isFinite(this.valueOf())?this.getUTCFullYear()+"-"+f(this.getUTCMonth()+1)+"-"+f(this.getUTCDate())+"T"+f(this.getUTCHours())+":"+f(this.getUTCMinutes())+":"+f(this.getUTCSeconds())+"Z":null},String.prototype.toJSON=Number.prototype.toJSON=Boolean.prototype.toJSON=function(a){return this.valueOf()});var cx=/[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,escapable=/[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,gap,indent,meta={"\b":"\\b","\t":"\\t","\n":"\\n","\f":"\\f","\r":"\\r",'"':'\\"',"\\":"\\\\"},rep;typeof JSON.stringify!="function"&&(JSON.stringify=function(a,b,c){var d;gap="",indent="";if(typeof c=="number")for(d=0;d<c;d+=1)indent+=" ";else typeof c=="string"&&(indent=c);rep=b;if(!b||typeof b=="function"||typeof b=="object"&&typeof b.length=="number")return str("",{"":a});throw new Error("JSON.stringify")}),typeof JSON.parse!="function"&&(JSON.parse=function(text,reviver){function walk(a,b){var c,d,e=a[b];if(e&&typeof e=="object")for(c in e)Object.prototype.hasOwnProperty.call(e,c)&&(d=walk(e,c),d!==undefined?e[c]=d:delete e[c]);return reviver.call(a,b,e)}var j;text=String(text),cx.lastIndex=0,cx.test(text)&&(text=text.replace(cx,function(a){return"\\u"+("0000"+a.charCodeAt(0).toString(16)).slice(-4)}));if(/^[\],:{}\s]*$/.test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,"@").replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,"]").replace(/(?:^|:|,)(?:\s*\[)+/g,""))){j=eval("("+text+")");return typeof reviver=="function"?walk({"":j},""):j}throw new SyntaxError("JSON.parse")})}()
+
+
+//     [*] Including lib/index.js
+// Public object
+SockJS = (function(){
+              var _document = document;
+              var _window = window;
+              var utils = {};
+
+
+//         [*] Including lib/reventtarget.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+/* Simplified implementation of DOM2 EventTarget.
+ *   http://www.w3.org/TR/DOM-Level-2-Events/events.html#Events-EventTarget
+ */
+var REventTarget = function() {};
+REventTarget.prototype.addEventListener = function (eventType, listener) {
+    if(!this._listeners) {
+         this._listeners = {};
+    }
+    if(!(eventType in this._listeners)) {
+        this._listeners[eventType] = [];
+    }
+    var arr = this._listeners[eventType];
+    if(utils.arrIndexOf(arr, listener) === -1) {
+        arr.push(listener);
+    }
+    return;
+};
+
+REventTarget.prototype.removeEventListener = function (eventType, listener) {
+    if(!(this._listeners && (eventType in this._listeners))) {
+        return;
+    }
+    var arr = this._listeners[eventType];
+    var idx = utils.arrIndexOf(arr, listener);
+    if (idx !== -1) {
+        if(arr.length > 1) {
+            this._listeners[eventType] = arr.slice(0, idx).concat( arr.slice(idx+1) );
+        } else {
+            delete this._listeners[eventType];
+        }
+        return;
+    }
+    return;
+};
+
+REventTarget.prototype.dispatchEvent = function (event) {
+    var t = event.type;
+    var args = Array.prototype.slice.call(arguments, 0);
+    if (this['on'+t]) {
+        this['on'+t].apply(this, args);
+    }
+    if (this._listeners && t in this._listeners) {
+        for(var i=0; i < this._listeners[t].length; i++) {
+            this._listeners[t][i].apply(this, args);
+        }
+    }
+};
+//         [*] End of lib/reventtarget.js
+
+
+//         [*] Including lib/simpleevent.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var SimpleEvent = function(type, obj) {
+    this.type = type;
+    if (typeof obj !== 'undefined') {
+        for(var k in obj) {
+            if (!obj.hasOwnProperty(k)) continue;
+            this[k] = obj[k];
+        }
+    }
+};
+
+SimpleEvent.prototype.toString = function() {
+    var r = [];
+    for(var k in this) {
+        if (!this.hasOwnProperty(k)) continue;
+        var v = this[k];
+        if (typeof v === 'function') v = '[function]';
+        r.push(k + '=' + v);
+    }
+    return 'SimpleEvent(' + r.join(', ') + ')';
+};
+//         [*] End of lib/simpleevent.js
+
+
+//         [*] Including lib/eventemitter.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var EventEmitter = function(events) {
+    var that = this;
+    that._events = events || [];
+    that._listeners = {};
+};
+EventEmitter.prototype.emit = function(type) {
+    var that = this;
+    that._verifyType(type);
+    if (that._nuked) return;
+
+    var args = Array.prototype.slice.call(arguments, 1);
+    if (that['on'+type]) {
+        that['on'+type].apply(that, args);
+    }
+    if (type in that._listeners) {
+        for(var i = 0; i < that._listeners[type].length; i++) {
+            that._listeners[type][i].apply(that, args);
+        }
+    }
+};
+
+EventEmitter.prototype.on = function(type, callback) {
+    var that = this;
+    that._verifyType(type);
+    if (that._nuked) return;
+
+    if (!(type in that._listeners)) {
+        that._listeners[type] = [];
+    }
+    that._listeners[type].push(callback);
+};
+
+EventEmitter.prototype._verifyType = function(type) {
+    var that = this;
+    if (utils.arrIndexOf(that._events, type) === -1) {
+        utils.log('Event ' + JSON.stringify(type) +
+                  ' not listed ' + JSON.stringify(that._events) +
+                  ' in ' + that);
+    }
+};
+
+EventEmitter.prototype.nuke = function() {
+    var that = this;
+    that._nuked = true;
+    for(var i=0; i<that._events.length; i++) {
+        delete that[that._events[i]];
+    }
+    that._listeners = {};
+};
+//         [*] End of lib/eventemitter.js
+
+
+//         [*] Including lib/utils.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var random_string_chars = 'abcdefghijklmnopqrstuvwxyz0123456789_';
+utils.random_string = function(length, max) {
+    max = max || random_string_chars.length;
+    var i, ret = [];
+    for(i=0; i < length; i++) {
+        ret.push( random_string_chars.substr(Math.floor(Math.random() * max),1) );
+    }
+    return ret.join('');
+};
+utils.random_number = function(max) {
+    return Math.floor(Math.random() * max);
+};
+utils.random_number_string = function(max) {
+    var t = (''+(max - 1)).length;
+    var p = Array(t+1).join('0');
+    return (p + utils.random_number(max)).slice(-t);
+};
+
+// Assuming that url looks like: http://asdasd:111/asd
+utils.getOrigin = function(url) {
+    url += '/';
+    var parts = url.split('/').slice(0, 3);
+    return parts.join('/');
+};
+
+utils.isSameOriginUrl = function(url_a, url_b) {
+    // location.origin would do, but it's not always available.
+    if (!url_b) url_b = _window.location.href;
+
+    return (url_a.split('/').slice(0,3).join('/')
+                ===
+            url_b.split('/').slice(0,3).join('/'));
+};
+
+utils.getParentDomain = function(url) {
+    // ipv4 ip address
+    if (/^[0-9.]*$/.test(url)) return url;
+    // ipv6 ip address
+    if (/^\[/.test(url)) return url;
+    // no dots
+    if (!(/[.]/.test(url))) return url;
+
+    var parts = url.split('.').slice(1);
+    return parts.join('.');
+};
+
+utils.objectExtend = function(dst, src) {
+    for(var k in src) {
+        if (src.hasOwnProperty(k)) {
+            dst[k] = src[k];
+        }
+    }
+    return dst;
+};
+
+var WPrefix = '_jp';
+
+utils.polluteGlobalNamespace = function() {
+    if (!(WPrefix in _window)) {
+        _window[WPrefix] = {};
+    }
+};
+
+utils.closeFrame = function (code, reason) {
+    return 'c'+JSON.stringify([code, reason]);
+};
+
+utils.userSetCode = function (code) {
+    return code === 1000 || (code >= 3000 && code <= 4999);
+};
+
+// See: http://www.erg.abdn.ac.uk/~gerrit/dccp/notes/ccid2/rto_estimator/
+// and RFC 2988.
+utils.countRTO = function (rtt) {
+    var rto;
+    if (rtt > 100) {
+        rto = 3 * rtt; // rto > 300msec
+    } else {
+        rto = rtt + 200; // 200msec < rto <= 300msec
+    }
+    return rto;
+}
+
+utils.log = function() {
+    if (_window.console && console.log && console.log.apply) {
+        console.log.apply(console, arguments);
+    }
+};
+
+utils.bind = function(fun, that) {
+    if (fun.bind) {
+        return fun.bind(that);
+    } else {
+        return function() {
+            return fun.apply(that, arguments);
+        };
+    }
+};
+
+utils.flatUrl = function(url) {
+    return url.indexOf('?') === -1 && url.indexOf('#') === -1;
+};
+
+utils.amendUrl = function(url) {
+    var dl = _document.location;
+    if (!url) {
+        throw new Error('Wrong url for SockJS');
+    }
+    if (!utils.flatUrl(url)) {
+        throw new Error('Only basic urls are supported in SockJS');
+    }
+
+    //  '//abc' --> 'http://abc'
+    if (url.indexOf('//') === 0) {
+        url = dl.protocol + url;
+    }
+    // '/abc' --> 'http://localhost:80/abc'
+    if (url.indexOf('/') === 0) {
+        url = dl.protocol + '//' + dl.host + url;
+    }
+    // strip trailing slashes
+    url = url.replace(/[/]+$/,'');
+    return url;
+};
+
+// IE doesn't support [].indexOf.
+utils.arrIndexOf = function(arr, obj){
+    for(var i=0; i < arr.length; i++){
+        if(arr[i] === obj){
+            return i;
+        }
+    }
+    return -1;
+};
+
+utils.arrSkip = function(arr, obj) {
+    var idx = utils.arrIndexOf(arr, obj);
+    if (idx === -1) {
+        return arr.slice();
+    } else {
+        var dst = arr.slice(0, idx);
+        return dst.concat(arr.slice(idx+1));
+    }
+};
+
+// Via: https://gist.github.com/1133122/2121c601c5549155483f50be3da5305e83b8c5df
+utils.isArray = Array.isArray || function(value) {
+    return {}.toString.call(value).indexOf('Array') >= 0
+};
+
+utils.delay = function(t, fun) {
+    if(typeof t === 'function') {
+        fun = t;
+        t = 0;
+    }
+    return setTimeout(fun, t);
+};
+
+
+// Chars worth escaping, as defined by Douglas Crockford:
+//   https://github.com/douglascrockford/JSON-js/blob/47a9882cddeb1e8529e07af9736218075372b8ac/json2.js#L196
+var json_escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+    json_lookup = {
+"\u0000":"\\u0000","\u0001":"\\u0001","\u0002":"\\u0002","\u0003":"\\u0003",
+"\u0004":"\\u0004","\u0005":"\\u0005","\u0006":"\\u0006","\u0007":"\\u0007",
+"\b":"\\b","\t":"\\t","\n":"\\n","\u000b":"\\u000b","\f":"\\f","\r":"\\r",
+"\u000e":"\\u000e","\u000f":"\\u000f","\u0010":"\\u0010","\u0011":"\\u0011",
+"\u0012":"\\u0012","\u0013":"\\u0013","\u0014":"\\u0014","\u0015":"\\u0015",
+"\u0016":"\\u0016","\u0017":"\\u0017","\u0018":"\\u0018","\u0019":"\\u0019",
+"\u001a":"\\u001a","\u001b":"\\u001b","\u001c":"\\u001c","\u001d":"\\u001d",
+"\u001e":"\\u001e","\u001f":"\\u001f","\"":"\\\"","\\":"\\\\",
+"\u007f":"\\u007f","\u0080":"\\u0080","\u0081":"\\u0081","\u0082":"\\u0082",
+"\u0083":"\\u0083","\u0084":"\\u0084","\u0085":"\\u0085","\u0086":"\\u0086",
+"\u0087":"\\u0087","\u0088":"\\u0088","\u0089":"\\u0089","\u008a":"\\u008a",
+"\u008b":"\\u008b","\u008c":"\\u008c","\u008d":"\\u008d","\u008e":"\\u008e",
+"\u008f":"\\u008f","\u0090":"\\u0090","\u0091":"\\u0091","\u0092":"\\u0092",
+"\u0093":"\\u0093","\u0094":"\\u0094","\u0095":"\\u0095","\u0096":"\\u0096",
+"\u0097":"\\u0097","\u0098":"\\u0098","\u0099":"\\u0099","\u009a":"\\u009a",
+"\u009b":"\\u009b","\u009c":"\\u009c","\u009d":"\\u009d","\u009e":"\\u009e",
+"\u009f":"\\u009f","\u00ad":"\\u00ad","\u0600":"\\u0600","\u0601":"\\u0601",
+"\u0602":"\\u0602","\u0603":"\\u0603","\u0604":"\\u0604","\u070f":"\\u070f",
+"\u17b4":"\\u17b4","\u17b5":"\\u17b5","\u200c":"\\u200c","\u200d":"\\u200d",
+"\u200e":"\\u200e","\u200f":"\\u200f","\u2028":"\\u2028","\u2029":"\\u2029",
+"\u202a":"\\u202a","\u202b":"\\u202b","\u202c":"\\u202c","\u202d":"\\u202d",
+"\u202e":"\\u202e","\u202f":"\\u202f","\u2060":"\\u2060","\u2061":"\\u2061",
+"\u2062":"\\u2062","\u2063":"\\u2063","\u2064":"\\u2064","\u2065":"\\u2065",
+"\u2066":"\\u2066","\u2067":"\\u2067","\u2068":"\\u2068","\u2069":"\\u2069",
+"\u206a":"\\u206a","\u206b":"\\u206b","\u206c":"\\u206c","\u206d":"\\u206d",
+"\u206e":"\\u206e","\u206f":"\\u206f","\ufeff":"\\ufeff","\ufff0":"\\ufff0",
+"\ufff1":"\\ufff1","\ufff2":"\\ufff2","\ufff3":"\\ufff3","\ufff4":"\\ufff4",
+"\ufff5":"\\ufff5","\ufff6":"\\ufff6","\ufff7":"\\ufff7","\ufff8":"\\ufff8",
+"\ufff9":"\\ufff9","\ufffa":"\\ufffa","\ufffb":"\\ufffb","\ufffc":"\\ufffc",
+"\ufffd":"\\ufffd","\ufffe":"\\ufffe","\uffff":"\\uffff"};
+
+// Some extra characters that Chrome gets wrong, and substitutes with
+// something else on the wire.
+var extra_escapable = /[\x00-\x1f\ud800-\udfff\ufffe\uffff\u0300-\u0333\u033d-\u0346\u034a-\u034c\u0350-\u0352\u0357-\u0358\u035c-\u0362\u0374\u037e\u0387\u0591-\u05af\u05c4\u0610-\u0617\u0653-\u0654\u0657-\u065b\u065d-\u065e\u06df-\u06e2\u06eb-\u06ec\u0730\u0732-\u0733\u0735-\u0736\u073a\u073d\u073f-\u0741\u0743\u0745\u0747\u07eb-\u07f1\u0951\u0958-\u095f\u09dc-\u09dd\u09df\u0a33\u0a36\u0a59-\u0a5b\u0a5e\u0b5c-\u0b5d\u0e38-\u0e39\u0f43\u0f4d\u0f52\u0f57\u0f5c\u0f69\u0f72-\u0f76\u0f78\u0f80-\u0f83\u0f93\u0f9d\u0fa2\u0fa7\u0fac\u0fb9\u1939-\u193a\u1a17\u1b6b\u1cda-\u1cdb\u1dc0-\u1dcf\u1dfc\u1dfe\u1f71\u1f73\u1f75\u1f77\u1f79\u1f7b\u1f7d\u1fbb\u1fbe\u1fc9\u1fcb\u1fd3\u1fdb\u1fe3\u1feb\u1fee-\u1fef\u1ff9\u1ffb\u1ffd\u2000-\u2001\u20d0-\u20d1\u20d4-\u20d7\u20e7-\u20e9\u2126\u212a-\u212b\u2329-\u232a\u2adc\u302b-\u302c\uaab2-\uaab3\uf900-\ufa0d\ufa10\ufa12\ufa15-\ufa1e\ufa20\ufa22\ufa25-\ufa26\ufa2a-\ufa2d\ufa30-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufb4e\ufff0-\uffff]/g,
+    extra_lookup;
+
+// JSON Quote string. Use native implementation when possible.
+var JSONQuote = (JSON && JSON.stringify) || function(string) {
+    json_escapable.lastIndex = 0;
+    if (json_escapable.test(string)) {
+        string = string.replace(json_escapable, function(a) {
+            return json_lookup[a];
+        });
+    }
+    return '"' + string + '"';
+};
+
+// This may be quite slow, so let's delay until user actually uses bad
+// characters.
+var unroll_lookup = function(escapable) {
+    var i;
+    var unrolled = {}
+    var c = []
+    for(i=0; i<65536; i++) {
+        c.push( String.fromCharCode(i) );
+    }
+    escapable.lastIndex = 0;
+    c.join('').replace(escapable, function (a) {
+        unrolled[ a ] = '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+        return '';
+    });
+    escapable.lastIndex = 0;
+    return unrolled;
+};
+
+// Quote string, also taking care of unicode characters that browsers
+// often break. Especially, take care of unicode surrogates:
+//    http://en.wikipedia.org/wiki/Mapping_of_Unicode_characters#Surrogates
+utils.quote = function(string) {
+    var quoted = JSONQuote(string);
+
+    // In most cases this should be very fast and good enough.
+    extra_escapable.lastIndex = 0;
+    if(!extra_escapable.test(quoted)) {
+        return quoted;
+    }
+
+    if(!extra_lookup) extra_lookup = unroll_lookup(extra_escapable);
+
+    return quoted.replace(extra_escapable, function(a) {
+        return extra_lookup[a];
+    });
+}
+
+var _all_protocols = ['websocket',
+                      'xdr-streaming',
+                      'xhr-streaming',
+                      'iframe-eventsource',
+                      'iframe-htmlfile',
+                      'xdr-polling',
+                      'xhr-polling',
+                      'iframe-xhr-polling',
+                      'jsonp-polling'];
+
+utils.probeProtocols = function() {
+    var probed = {};
+    for(var i=0; i<_all_protocols.length; i++) {
+        var protocol = _all_protocols[i];
+        // User can have a typo in protocol name.
+        probed[protocol] = SockJS[protocol] &&
+                           SockJS[protocol].enabled();
+    }
+    return probed;
+};
+
+utils.detectProtocols = function(probed, protocols_whitelist, info) {
+    var pe = {},
+        protocols = [];
+    if (!protocols_whitelist) protocols_whitelist = _all_protocols;
+    for(var i=0; i<protocols_whitelist.length; i++) {
+        var protocol = protocols_whitelist[i];
+        pe[protocol] = probed[protocol];
+    }
+    var maybe_push = function(protos) {
+        var proto = protos.shift();
+        if (pe[proto]) {
+            protocols.push(proto);
+        } else {
+            if (protos.length > 0) {
+                maybe_push(protos);
+            }
+        }
+    }
+
+    // 1. Websocket
+    if (info.websocket !== false) {
+        maybe_push(['websocket']);
+    }
+
+    // 2. Streaming
+    if (pe['xhr-streaming'] && !info.null_origin) {
+        protocols.push('xhr-streaming');
+    } else {
+        if (pe['xdr-streaming'] && !info.cookie_needed && !info.null_origin) {
+            protocols.push('xdr-streaming');
+        } else {
+            maybe_push(['iframe-eventsource',
+                        'iframe-htmlfile']);
+        }
+    }
+
+    // 3. Polling
+    if (pe['xhr-polling'] && !info.null_origin) {
+        protocols.push('xhr-polling');
+    } else {
+        if (pe['xdr-polling'] && !info.cookie_needed && !info.null_origin) {
+            protocols.push('xdr-polling');
+        } else {
+            maybe_push(['iframe-xhr-polling',
+                        'jsonp-polling']);
+        }
+    }
+    return protocols;
+}
+//         [*] End of lib/utils.js
+
+
+//         [*] Including lib/dom.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+// May be used by htmlfile jsonp and transports.
+var MPrefix = '_sockjs_global';
+utils.createHook = function() {
+    var window_id = 'a' + utils.random_string(8);
+    if (!(MPrefix in _window)) {
+        var map = {};
+        _window[MPrefix] = function(window_id) {
+            if (!(window_id in map)) {
+                map[window_id] = {
+                    id: window_id,
+                    del: function() {delete map[window_id];}
+                };
+            }
+            return map[window_id];
+        }
+    }
+    return _window[MPrefix](window_id);
+};
+
+
+
+utils.attachMessage = function(listener) {
+    utils.attachEvent('message', listener);
+};
+utils.attachEvent = function(event, listener) {
+    if (typeof _window.addEventListener !== 'undefined') {
+        _window.addEventListener(event, listener, false);
+    } else {
+        // IE quirks.
+        // According to: http://stevesouders.com/misc/test-postmessage.php
+        // the message gets delivered only to 'document', not 'window'.
+        _document.attachEvent("on" + event, listener);
+        // I get 'window' for ie8.
+        _window.attachEvent("on" + event, listener);
+    }
+};
+
+utils.detachMessage = function(listener) {
+    utils.detachEvent('message', listener);
+};
+utils.detachEvent = function(event, listener) {
+    if (typeof _window.addEventListener !== 'undefined') {
+        _window.removeEventListener(event, listener, false);
+    } else {
+        _document.detachEvent("on" + event, listener);
+        _window.detachEvent("on" + event, listener);
+    }
+};
+
+
+var on_unload = {};
+// Things registered after beforeunload are to be called immediately.
+var after_unload = false;
+
+var trigger_unload_callbacks = function() {
+    for(var ref in on_unload) {
+        on_unload[ref]();
+        delete on_unload[ref];
+    };
+};
+
+var unload_triggered = function() {
+    if(after_unload) return;
+    after_unload = true;
+    trigger_unload_callbacks();
+};
+
+// 'unload' alone is not reliable in opera within an iframe, but we
+// can't use `beforeunload` as IE fires it on javascript: links.
+utils.attachEvent('unload', unload_triggered);
+
+utils.unload_add = function(listener) {
+    var ref = utils.random_string(8);
+    on_unload[ref] = listener;
+    if (after_unload) {
+        utils.delay(trigger_unload_callbacks);
+    }
+    return ref;
+};
+utils.unload_del = function(ref) {
+    if (ref in on_unload)
+        delete on_unload[ref];
+};
+
+
+utils.createIframe = function (iframe_url, error_callback) {
+    var iframe = _document.createElement('iframe');
+    var tref, unload_ref;
+    var unattach = function() {
+        clearTimeout(tref);
+        // Explorer had problems with that.
+        try {iframe.onload = null;} catch (x) {}
+        iframe.onerror = null;
+    };
+    var cleanup = function() {
+        if (iframe) {
+            unattach();
+            // This timeout makes chrome fire onbeforeunload event
+            // within iframe. Without the timeout it goes straight to
+            // onunload.
+            setTimeout(function() {
+                if(iframe) {
+                    iframe.parentNode.removeChild(iframe);
+                }
+                iframe = null;
+            }, 0);
+            utils.unload_del(unload_ref);
+        }
+    };
+    var onerror = function(r) {
+        if (iframe) {
+            cleanup();
+            error_callback(r);
+        }
+    };
+    var post = function(msg, origin) {
+        try {
+            // When the iframe is not loaded, IE raises an exception
+            // on 'contentWindow'.
+            if (iframe && iframe.contentWindow) {
+                iframe.contentWindow.postMessage(msg, origin);
+            }
+        } catch (x) {};
+    };
+
+    iframe.src = iframe_url;
+    iframe.style.display = 'none';
+    iframe.style.position = 'absolute';
+    iframe.onerror = function(){onerror('onerror');};
+    iframe.onload = function() {
+        // `onload` is triggered before scripts on the iframe are
+        // executed. Give it few seconds to actually load stuff.
+        clearTimeout(tref);
+        tref = setTimeout(function(){onerror('onload timeout');}, 2000);
+    };
+    _document.body.appendChild(iframe);
+    tref = setTimeout(function(){onerror('timeout');}, 15000);
+    unload_ref = utils.unload_add(cleanup);
+    return {
+        post: post,
+        cleanup: cleanup,
+        loaded: unattach
+    };
+};
+
+utils.createHtmlfile = function (iframe_url, error_callback) {
+    var doc = new ActiveXObject('htmlfile');
+    var tref, unload_ref;
+    var iframe;
+    var unattach = function() {
+        clearTimeout(tref);
+    };
+    var cleanup = function() {
+        if (doc) {
+            unattach();
+            utils.unload_del(unload_ref);
+            iframe.parentNode.removeChild(iframe);
+            iframe = doc = null;
+            CollectGarbage();
+        }
+    };
+    var onerror = function(r)  {
+        if (doc) {
+            cleanup();
+            error_callback(r);
+        }
+    };
+    var post = function(msg, origin) {
+        try {
+            // When the iframe is not loaded, IE raises an exception
+            // on 'contentWindow'.
+            if (iframe && iframe.contentWindow) {
+                iframe.contentWindow.postMessage(msg, origin);
+            }
+        } catch (x) {};
+    };
+
+    doc.open();
+    doc.write('<html><s' + 'cript>' +
+              'document.domain="' + document.domain + '";' +
+              '</s' + 'cript></html>');
+    doc.close();
+    doc.parentWindow[WPrefix] = _window[WPrefix];
+    var c = doc.createElement('div');
+    doc.body.appendChild(c);
+    iframe = doc.createElement('iframe');
+    c.appendChild(iframe);
+    iframe.src = iframe_url;
+    tref = setTimeout(function(){onerror('timeout');}, 15000);
+    unload_ref = utils.unload_add(cleanup);
+    return {
+        post: post,
+        cleanup: cleanup,
+        loaded: unattach
+    };
+};
+//         [*] End of lib/dom.js
+
+
+//         [*] Including lib/dom2.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var AbstractXHRObject = function(){};
+AbstractXHRObject.prototype = new EventEmitter(['chunk', 'finish']);
+
+AbstractXHRObject.prototype._start = function(method, url, payload, opts) {
+    var that = this;
+
+    try {
+        that.xhr = new XMLHttpRequest();
+    } catch(x) {};
+
+    if (!that.xhr) {
+        try {
+            that.xhr = new _window.ActiveXObject('Microsoft.XMLHTTP');
+        } catch(x) {};
+    }
+    if (_window.ActiveXObject || _window.XDomainRequest) {
+        // IE8 caches even POSTs
+        url += ((url.indexOf('?') === -1) ? '?' : '&') + 't='+(+new Date);
+    }
+
+    // Explorer tends to keep connection open, even after the
+    // tab gets closed: http://bugs.jquery.com/ticket/5280
+    that.unload_ref = utils.unload_add(function(){that._cleanup(true);});
+    try {
+        that.xhr.open(method, url, true);
+    } catch(e) {
+        // IE raises an exception on wrong port.
+        that.emit('finish', 0, '');
+        that._cleanup();
+        return;
+    };
+
+    if (!opts || !opts.no_credentials) {
+        // Mozilla docs says https://developer.mozilla.org/en/XMLHttpRequest :
+        // "This never affects same-site requests."
+        that.xhr.withCredentials = 'true';
+    }
+    if (opts && opts.headers) {
+        for(var key in opts.headers) {
+            that.xhr.setRequestHeader(key, opts.headers[key]);
+        }
+    }
+
+    that.xhr.onreadystatechange = function() {
+        if (that.xhr) {
+            var x = that.xhr;
+            switch (x.readyState) {
+            case 3:
+                // IE doesn't like peeking into responseText or status
+                // on Microsoft.XMLHTTP and readystate=3
+                try {
+                    var status = x.status;
+                    var text = x.responseText;
+                } catch (x) {};
+                // IE returns 1223 for 204: http://bugs.jquery.com/ticket/1450
+                if (status === 1223) status = 204;
+
+                // IE does return readystate == 3 for 404 answers.
+                if (text && text.length > 0) {
+                    that.emit('chunk', status, text);
+                }
+                break;
+            case 4:
+                var status = x.status;
+                // IE returns 1223 for 204: http://bugs.jquery.com/ticket/1450
+                if (status === 1223) status = 204;
+
+                that.emit('finish', status, x.responseText);
+                that._cleanup(false);
+                break;
+            }
+        }
+    };
+    that.xhr.send(payload);
+};
+
+AbstractXHRObject.prototype._cleanup = function(abort) {
+    var that = this;
+    if (!that.xhr) return;
+    utils.unload_del(that.unload_ref);
+
+    // IE needs this field to be a function
+    that.xhr.onreadystatechange = function(){};
+
+    if (abort) {
+        try {
+            that.xhr.abort();
+        } catch(x) {};
+    }
+    that.unload_ref = that.xhr = null;
+};
+
+AbstractXHRObject.prototype.close = function() {
+    var that = this;
+    that.nuke();
+    that._cleanup(true);
+};
+
+var XHRCorsObject = utils.XHRCorsObject = function() {
+    var that = this, args = arguments;
+    utils.delay(function(){that._start.apply(that, args);});
+};
+XHRCorsObject.prototype = new AbstractXHRObject();
+
+var XHRLocalObject = utils.XHRLocalObject = function(method, url, payload) {
+    var that = this;
+    utils.delay(function(){
+        that._start(method, url, payload, {
+            no_credentials: true
+        });
+    });
+};
+XHRLocalObject.prototype = new AbstractXHRObject();
+
+
+
+// References:
+//   http://ajaxian.com/archives/100-line-ajax-wrapper
+//   http://msdn.microsoft.com/en-us/library/cc288060(v=VS.85).aspx
+var XDRObject = utils.XDRObject = function(method, url, payload) {
+    var that = this;
+    utils.delay(function(){that._start(method, url, payload);});
+};
+XDRObject.prototype = new EventEmitter(['chunk', 'finish']);
+XDRObject.prototype._start = function(method, url, payload) {
+    var that = this;
+    var xdr = new XDomainRequest();
+    // IE caches even POSTs
+    url += ((url.indexOf('?') === -1) ? '?' : '&') + 't='+(+new Date);
+
+    var onerror = xdr.ontimeout = xdr.onerror = function() {
+        that.emit('finish', 0, '');
+        that._cleanup(false);
+    };
+    xdr.onprogress = function() {
+        that.emit('chunk', 200, xdr.responseText);
+    };
+    xdr.onload = function() {
+        that.emit('finish', 200, xdr.responseText);
+        that._cleanup(false);
+    };
+    that.xdr = xdr;
+    that.unload_ref = utils.unload_add(function(){that._cleanup(true);});
+    try {
+        // Fails with AccessDenied if port number is bogus
+        that.xdr.open(method, url);
+        that.xdr.send(payload);
+    } catch(x) {
+        onerror();
+    }
+};
+
+XDRObject.prototype._cleanup = function(abort) {
+    var that = this;
+    if (!that.xdr) return;
+    utils.unload_del(that.unload_ref);
+
+    that.xdr.ontimeout = that.xdr.onerror = that.xdr.onprogress =
+        that.xdr.onload = null;
+    if (abort) {
+        try {
+            that.xdr.abort();
+        } catch(x) {};
+    }
+    that.unload_ref = that.xdr = null;
+};
+
+XDRObject.prototype.close = function() {
+    var that = this;
+    that.nuke();
+    that._cleanup(true);
+};
+
+// 1. Is natively via XHR
+// 2. Is natively via XDR
+// 3. Nope, but postMessage is there so it should work via the Iframe.
+// 4. Nope, sorry.
+utils.isXHRCorsCapable = function() {
+    if (_window.XMLHttpRequest && 'withCredentials' in new XMLHttpRequest()) {
+        return 1;
+    }
+    // XDomainRequest doesn't work if page is served from file://
+    if (_window.XDomainRequest && _document.domain) {
+        return 2;
+    }
+    if (IframeTransport.enabled()) {
+        return 3;
+    }
+    return 4;
+};
+//         [*] End of lib/dom2.js
+
+
+//         [*] Including lib/sockjs.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var SockJS = function(url, dep_protocols_whitelist, options) {
+    if (this === _window) {
+        // makes `new` optional
+        return new SockJS(url, dep_protocols_whitelist, options);
+    }
+    
+    var that = this, protocols_whitelist;
+    that._options = {devel: false, debug: false, protocols_whitelist: [],
+                     info: undefined, rtt: undefined};
+    if (options) {
+        utils.objectExtend(that._options, options);
+    }
+    that._base_url = utils.amendUrl(url);
+    that._server = that._options.server || utils.random_number_string(1000);
+    if (that._options.protocols_whitelist &&
+        that._options.protocols_whitelist.length) {
+        protocols_whitelist = that._options.protocols_whitelist;
+    } else {
+        // Deprecated API
+        if (typeof dep_protocols_whitelist === 'string' &&
+            dep_protocols_whitelist.length > 0) {
+            protocols_whitelist = [dep_protocols_whitelist];
+        } else if (utils.isArray(dep_protocols_whitelist)) {
+            protocols_whitelist = dep_protocols_whitelist
+        } else {
+            protocols_whitelist = null;
+        }
+        if (protocols_whitelist) {
+            that._debug('Deprecated API: Use "protocols_whitelist" option ' +
+                        'instead of supplying protocol list as a second ' +
+                        'parameter to SockJS constructor.');
+        }
+    }
+    that._protocols = [];
+    that.protocol = null;
+    that.readyState = SockJS.CONNECTING;
+    that._ir = createInfoReceiver(that._base_url);
+    that._ir.onfinish = function(info, rtt) {
+        that._ir = null;
+        if (info) {
+            if (that._options.info) {
+                // Override if user supplies the option
+                info = utils.objectExtend(info, that._options.info);
+            }
+            if (that._options.rtt) {
+                rtt = that._options.rtt;
+            }
+            that._applyInfo(info, rtt, protocols_whitelist);
+            that._didClose();
+        } else {
+            that._didClose(1002, 'Can\'t connect to server', true);
+        }
+    };
+};
+// Inheritance
+SockJS.prototype = new REventTarget();
+
+SockJS.version = "0.3.4";
+
+SockJS.CONNECTING = 0;
+SockJS.OPEN = 1;
+SockJS.CLOSING = 2;
+SockJS.CLOSED = 3;
+
+SockJS.prototype._debug = function() {
+    if (this._options.debug)
+        utils.log.apply(utils, arguments);
+};
+
+SockJS.prototype._dispatchOpen = function() {
+    var that = this;
+    if (that.readyState === SockJS.CONNECTING) {
+        if (that._transport_tref) {
+            clearTimeout(that._transport_tref);
+            that._transport_tref = null;
+        }
+        that.readyState = SockJS.OPEN;
+        that.dispatchEvent(new SimpleEvent("open"));
+    } else {
+        // The server might have been restarted, and lost track of our
+        // connection.
+        that._didClose(1006, "Server lost session");
+    }
+};
+
+SockJS.prototype._dispatchMessage = function(data) {
+    var that = this;
+    if (that.readyState !== SockJS.OPEN)
+            return;
+    that.dispatchEvent(new SimpleEvent("message", {data: data}));
+};
+
+SockJS.prototype._dispatchHeartbeat = function(data) {
+    var that = this;
+    if (that.readyState !== SockJS.OPEN)
+        return;
+    that.dispatchEvent(new SimpleEvent('heartbeat', {}));
+};
+
+SockJS.prototype._didClose = function(code, reason, force) {
+    var that = this;
+    if (that.readyState !== SockJS.CONNECTING &&
+        that.readyState !== SockJS.OPEN &&
+        that.readyState !== SockJS.CLOSING)
+            throw new Error('INVALID_STATE_ERR');
+    if (that._ir) {
+        that._ir.nuke();
+        that._ir = null;
+    }
+
+    if (that._transport) {
+        that._transport.doCleanup();
+        that._transport = null;
+    }
+
+    var close_event = new SimpleEvent("close", {
+        code: code,
+        reason: reason,
+        wasClean: utils.userSetCode(code)});
+
+    if (!utils.userSetCode(code) &&
+        that.readyState === SockJS.CONNECTING && !force) {
+        if (that._try_next_protocol(close_event)) {
+            return;
+        }
+        close_event = new SimpleEvent("close", {code: 2000,
+                                                reason: "All transports failed",
+                                                wasClean: false,
+                                                last_event: close_event});
+    }
+    that.readyState = SockJS.CLOSED;
+
+    utils.delay(function() {
+                   that.dispatchEvent(close_event);
+                });
+};
+
+SockJS.prototype._didMessage = function(data) {
+    var that = this;
+    var type = data.slice(0, 1);
+    switch(type) {
+    case 'o':
+        that._dispatchOpen();
+        break;
+    case 'a':
+        var payload = JSON.parse(data.slice(1) || '[]');
+        for(var i=0; i < payload.length; i++){
+            that._dispatchMessage(payload[i]);
+        }
+        break;
+    case 'm':
+        var payload = JSON.parse(data.slice(1) || 'null');
+        that._dispatchMessage(payload);
+        break;
+    case 'c':
+        var payload = JSON.parse(data.slice(1) || '[]');
+        that._didClose(payload[0], payload[1]);
+        break;
+    case 'h':
+        that._dispatchHeartbeat();
+        break;
+    }
+};
+
+SockJS.prototype._try_next_protocol = function(close_event) {
+    var that = this;
+    if (that.protocol) {
+        that._debug('Closed transport:', that.protocol, ''+close_event);
+        that.protocol = null;
+    }
+    if (that._transport_tref) {
+        clearTimeout(that._transport_tref);
+        that._transport_tref = null;
+    }
+
+    while(1) {
+        var protocol = that.protocol = that._protocols.shift();
+        if (!protocol) {
+            return false;
+        }
+        // Some protocols require access to `body`, what if were in
+        // the `head`?
+        if (SockJS[protocol] &&
+            SockJS[protocol].need_body === true &&
+            (!_document.body ||
+             (typeof _document.readyState !== 'undefined'
+              && _document.readyState !== 'complete'))) {
+            that._protocols.unshift(protocol);
+            that.protocol = 'waiting-for-load';
+            utils.attachEvent('load', function(){
+                that._try_next_protocol();
+            });
+            return true;
+        }
+
+        if (!SockJS[protocol] ||
+              !SockJS[protocol].enabled(that._options)) {
+            that._debug('Skipping transport:', protocol);
+        } else {
+            var roundTrips = SockJS[protocol].roundTrips || 1;
+            var to = ((that._options.rto || 0) * roundTrips) || 5000;
+            that._transport_tref = utils.delay(to, function() {
+                if (that.readyState === SockJS.CONNECTING) {
+                    // I can't understand how it is possible to run
+                    // this timer, when the state is CLOSED, but
+                    // apparently in IE everythin is possible.
+                    that._didClose(2007, "Transport timeouted");
+                }
+            });
+
+            var connid = utils.random_string(8);
+            var trans_url = that._base_url + '/' + that._server + '/' + connid;
+            that._debug('Opening transport:', protocol, ' url:'+trans_url,
+                        ' RTO:'+that._options.rto);
+            that._transport = new SockJS[protocol](that, trans_url,
+                                                   that._base_url);
+            return true;
+        }
+    }
+};
+
+SockJS.prototype.close = function(code, reason) {
+    var that = this;
+    if (code && !utils.userSetCode(code))
+        throw new Error("INVALID_ACCESS_ERR");
+    if(that.readyState !== SockJS.CONNECTING &&
+       that.readyState !== SockJS.OPEN) {
+        return false;
+    }
+    that.readyState = SockJS.CLOSING;
+    that._didClose(code || 1000, reason || "Normal closure");
+    return true;
+};
+
+SockJS.prototype.send = function(data) {
+    var that = this;
+    if (that.readyState === SockJS.CONNECTING)
+        throw new Error('INVALID_STATE_ERR');
+    if (that.readyState === SockJS.OPEN) {
+        that._transport.doSend(utils.quote('' + data));
+    }
+    return true;
+};
+
+SockJS.prototype._applyInfo = function(info, rtt, protocols_whitelist) {
+    var that = this;
+    that._options.info = info;
+    that._options.rtt = rtt;
+    that._options.rto = utils.countRTO(rtt);
+    that._options.info.null_origin = !_document.domain;
+    var probed = utils.probeProtocols();
+    that._protocols = utils.detectProtocols(probed, protocols_whitelist, info);
+};
+//         [*] End of lib/sockjs.js
+
+
+//         [*] Including lib/trans-websocket.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var WebSocketTransport = SockJS.websocket = function(ri, trans_url) {
+    var that = this;
+    var url = trans_url + '/websocket';
+    if (url.slice(0, 5) === 'https') {
+        url = 'wss' + url.slice(5);
+    } else {
+        url = 'ws' + url.slice(4);
+    }
+    that.ri = ri;
+    that.url = url;
+    var Constructor = _window.WebSocket || _window.MozWebSocket;
+
+    that.ws = new Constructor(that.url);
+    that.ws.onmessage = function(e) {
+        that.ri._didMessage(e.data);
+    };
+    // Firefox has an interesting bug. If a websocket connection is
+    // created after onunload, it stays alive even when user
+    // navigates away from the page. In such situation let's lie -
+    // let's not open the ws connection at all. See:
+    // https://github.com/sockjs/sockjs-client/issues/28
+    // https://bugzilla.mozilla.org/show_bug.cgi?id=696085
+    that.unload_ref = utils.unload_add(function(){that.ws.close()});
+    that.ws.onclose = function() {
+        that.ri._didMessage(utils.closeFrame(1006, "WebSocket connection broken"));
+    };
+};
+
+WebSocketTransport.prototype.doSend = function(data) {
+    this.ws.send('[' + data + ']');
+};
+
+WebSocketTransport.prototype.doCleanup = function() {
+    var that = this;
+    var ws = that.ws;
+    if (ws) {
+        ws.onmessage = ws.onclose = null;
+        ws.close();
+        utils.unload_del(that.unload_ref);
+        that.unload_ref = that.ri = that.ws = null;
+    }
+};
+
+WebSocketTransport.enabled = function() {
+    return !!(_window.WebSocket || _window.MozWebSocket);
+};
+
+// In theory, ws should require 1 round trip. But in chrome, this is
+// not very stable over SSL. Most likely a ws connection requires a
+// separate SSL connection, in which case 2 round trips are an
+// absolute minumum.
+WebSocketTransport.roundTrips = 2;
+//         [*] End of lib/trans-websocket.js
+
+
+//         [*] Including lib/trans-sender.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var BufferedSender = function() {};
+BufferedSender.prototype.send_constructor = function(sender) {
+    var that = this;
+    that.send_buffer = [];
+    that.sender = sender;
+};
+BufferedSender.prototype.doSend = function(message) {
+    var that = this;
+    that.send_buffer.push(message);
+    if (!that.send_stop) {
+        that.send_schedule();
+    }
+};
+
+// For polling transports in a situation when in the message callback,
+// new message is being send. If the sending connection was started
+// before receiving one, it is possible to saturate the network and
+// timeout due to the lack of receiving socket. To avoid that we delay
+// sending messages by some small time, in order to let receiving
+// connection be started beforehand. This is only a halfmeasure and
+// does not fix the big problem, but it does make the tests go more
+// stable on slow networks.
+BufferedSender.prototype.send_schedule_wait = function() {
+    var that = this;
+    var tref;
+    that.send_stop = function() {
+        that.send_stop = null;
+        clearTimeout(tref);
+    };
+    tref = utils.delay(25, function() {
+        that.send_stop = null;
+        that.send_schedule();
+    });
+};
+
+BufferedSender.prototype.send_schedule = function() {
+    var that = this;
+    if (that.send_buffer.length > 0) {
+        var payload = '[' + that.send_buffer.join(',') + ']';
+        that.send_stop = that.sender(that.trans_url, payload, function(success, abort_reason) {
+            that.send_stop = null;
+            if (success === false) {
+                that.ri._didClose(1006, 'Sending error ' + abort_reason);
+            } else {
+                that.send_schedule_wait();
+            }
+        });
+        that.send_buffer = [];
+    }
+};
+
+BufferedSender.prototype.send_destructor = function() {
+    var that = this;
+    if (that._send_stop) {
+        that._send_stop();
+    }
+    that._send_stop = null;
+};
+
+var jsonPGenericSender = function(url, payload, callback) {
+    var that = this;
+
+    if (!('_send_form' in that)) {
+        var form = that._send_form = _document.createElement('form');
+        var area = that._send_area = _document.createElement('textarea');
+        area.name = 'd';
+        form.style.display = 'none';
+        form.style.position = 'absolute';
+        form.method = 'POST';
+        form.enctype = 'application/x-www-form-urlencoded';
+        form.acceptCharset = "UTF-8";
+        form.appendChild(area);
+        _document.body.appendChild(form);
+    }
+    var form = that._send_form;
+    var area = that._send_area;
+    var id = 'a' + utils.random_string(8);
+    form.target = id;
+    form.action = url + '/jsonp_send?i=' + id;
+
+    var iframe;
+    try {
+        // ie6 dynamic iframes with target="" support (thanks Chris Lambacher)
+        iframe = _document.createElement('<iframe name="'+ id +'">');
+    } catch(x) {
+        iframe = _document.createElement('iframe');
+        iframe.name = id;
+    }
+    iframe.id = id;
+    form.appendChild(iframe);
+    iframe.style.display = 'none';
+
+    try {
+        area.value = payload;
+    } catch(e) {
+        utils.log('Your browser is seriously broken. Go home! ' + e.message);
+    }
+    form.submit();
+
+    var completed = function(e) {
+        if (!iframe.onerror) return;
+        iframe.onreadystatechange = iframe.onerror = iframe.onload = null;
+        // Opera mini doesn't like if we GC iframe
+        // immediately, thus this timeout.
+        utils.delay(500, function() {
+                       iframe.parentNode.removeChild(iframe);
+                       iframe = null;
+                   });
+        area.value = '';
+        // It is not possible to detect if the iframe succeeded or
+        // failed to submit our form.
+        callback(true);
+    };
+    iframe.onerror = iframe.onload = completed;
+    iframe.onreadystatechange = function(e) {
+        if (iframe.readyState == 'complete') completed();
+    };
+    return completed;
+};
+
+var createAjaxSender = function(AjaxObject) {
+    return function(url, payload, callback) {
+        var xo = new AjaxObject('POST', url + '/xhr_send', payload);
+        xo.onfinish = function(status, text) {
+            callback(status === 200 || status === 204,
+                     'http status ' + status);
+        };
+        return function(abort_reason) {
+            callback(false, abort_reason);
+        };
+    };
+};
+//         [*] End of lib/trans-sender.js
+
+
+//         [*] Including lib/trans-jsonp-receiver.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+// Parts derived from Socket.io:
+//    https://github.com/LearnBoost/socket.io/blob/0.6.17/lib/socket.io/transports/jsonp-polling.js
+// and jQuery-JSONP:
+//    https://code.google.com/p/jquery-jsonp/source/browse/trunk/core/jquery.jsonp.js
+var jsonPGenericReceiver = function(url, callback) {
+    var tref;
+    var script = _document.createElement('script');
+    var script2;  // Opera synchronous load trick.
+    var close_script = function(frame) {
+        if (script2) {
+            script2.parentNode.removeChild(script2);
+            script2 = null;
+        }
+        if (script) {
+            clearTimeout(tref);
+            // Unfortunately, you can't really abort script loading of
+            // the script.
+            script.parentNode.removeChild(script);
+            script.onreadystatechange = script.onerror =
+                script.onload = script.onclick = null;
+            script = null;
+            callback(frame);
+            callback = null;
+        }
+    };
+
+    // IE9 fires 'error' event after orsc or before, in random order.
+    var loaded_okay = false;
+    var error_timer = null;
+
+    script.id = 'a' + utils.random_string(8);
+    script.src = url;
+    script.type = 'text/javascript';
+    script.charset = 'UTF-8';
+    script.onerror = function(e) {
+        if (!error_timer) {
+            // Delay firing close_script.
+            error_timer = setTimeout(function() {
+                if (!loaded_okay) {
+                    close_script(utils.closeFrame(
+                        1006,
+                        "JSONP script loaded abnormally (onerror)"));
+                }
+            }, 1000);
+        }
+    };
+    script.onload = function(e) {
+        close_script(utils.closeFrame(1006, "JSONP script loaded abnormally (onload)"));
+    };
+
+    script.onreadystatechange = function(e) {
+        if (/loaded|closed/.test(script.readyState)) {
+            if (script && script.htmlFor && script.onclick) {
+                loaded_okay = true;
+                try {
+                    // In IE, actually execute the script.
+                    script.onclick();
+                } catch (x) {}
+            }
+            if (script) {
+                close_script(utils.closeFrame(1006, "JSONP script loaded abnormally (onreadystatechange)"));
+            }
+        }
+    };
+    // IE: event/htmlFor/onclick trick.
+    // One can't rely on proper order for onreadystatechange. In order to
+    // make sure, set a 'htmlFor' and 'event' properties, so that
+    // script code will be installed as 'onclick' handler for the
+    // script object. Later, onreadystatechange, manually execute this
+    // code. FF and Chrome doesn't work with 'event' and 'htmlFor'
+    // set. For reference see:
+    //   http://jaubourg.net/2010/07/loading-script-as-onclick-handler-of.html
+    // Also, read on that about script ordering:
+    //   http://wiki.whatwg.org/wiki/Dynamic_Script_Execution_Order
+    if (typeof script.async === 'undefined' && _document.attachEvent) {
+        // According to mozilla docs, in recent browsers script.async defaults
+        // to 'true', so we may use it to detect a good browser:
+        // https://developer.mozilla.org/en/HTML/Element/script
+        if (!/opera/i.test(navigator.userAgent)) {
+            // Naively assume we're in IE
+            try {
+                script.htmlFor = script.id;
+                script.event = "onclick";
+            } catch (x) {}
+            script.async = true;
+        } else {
+            // Opera, second sync script hack
+            script2 = _document.createElement('script');
+            script2.text = "try{var a = document.getElementById('"+script.id+"'); if(a)a.onerror();}catch(x){};";
+            script.async = script2.async = false;
+        }
+    }
+    if (typeof script.async !== 'undefined') {
+        script.async = true;
+    }
+
+    // Fallback mostly for Konqueror - stupid timer, 35 seconds shall be plenty.
+    tref = setTimeout(function() {
+                          close_script(utils.closeFrame(1006, "JSONP script loaded abnormally (timeout)"));
+                      }, 35000);
+
+    var head = _document.getElementsByTagName('head')[0];
+    head.insertBefore(script, head.firstChild);
+    if (script2) {
+        head.insertBefore(script2, head.firstChild);
+    }
+    return close_script;
+};
+//         [*] End of lib/trans-jsonp-receiver.js
+
+
+//         [*] Including lib/trans-jsonp-polling.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+// The simplest and most robust transport, using the well-know cross
+// domain hack - JSONP. This transport is quite inefficient - one
+// mssage could use up to one http request. But at least it works almost
+// everywhere.
+// Known limitations:
+//   o you will get a spinning cursor
+//   o for Konqueror a dumb timer is needed to detect errors
+
+
+var JsonPTransport = SockJS['jsonp-polling'] = function(ri, trans_url) {
+    utils.polluteGlobalNamespace();
+    var that = this;
+    that.ri = ri;
+    that.trans_url = trans_url;
+    that.send_constructor(jsonPGenericSender);
+    that._schedule_recv();
+};
+
+// Inheritnace
+JsonPTransport.prototype = new BufferedSender();
+
+JsonPTransport.prototype._schedule_recv = function() {
+    var that = this;
+    var callback = function(data) {
+        that._recv_stop = null;
+        if (data) {
+            // no data - heartbeat;
+            if (!that._is_closing) {
+                that.ri._didMessage(data);
+            }
+        }
+        // The message can be a close message, and change is_closing state.
+        if (!that._is_closing) {
+            that._schedule_recv();
+        }
+    };
+    that._recv_stop = jsonPReceiverWrapper(that.trans_url + '/jsonp',
+                                           jsonPGenericReceiver, callback);
+};
+
+JsonPTransport.enabled = function() {
+    return true;
+};
+
+JsonPTransport.need_body = true;
+
+
+JsonPTransport.prototype.doCleanup = function() {
+    var that = this;
+    that._is_closing = true;
+    if (that._recv_stop) {
+        that._recv_stop();
+    }
+    that.ri = that._recv_stop = null;
+    that.send_destructor();
+};
+
+
+// Abstract away code that handles global namespace pollution.
+var jsonPReceiverWrapper = function(url, constructReceiver, user_callback) {
+    var id = 'a' + utils.random_string(6);
+    var url_id = url + '?c=' + escape(WPrefix + '.' + id);
+
+    // Unfortunately it is not possible to abort loading of the
+    // script. We need to keep track of frake close frames.
+    var aborting = 0;
+
+    // Callback will be called exactly once.
+    var callback = function(frame) {
+        switch(aborting) {
+        case 0:
+            // Normal behaviour - delete hook _and_ emit message.
+            delete _window[WPrefix][id];
+            user_callback(frame);
+            break;
+        case 1:
+            // Fake close frame - emit but don't delete hook.
+            user_callback(frame);
+            aborting = 2;
+            break;
+        case 2:
+            // Got frame after connection was closed, delete hook, don't emit.
+            delete _window[WPrefix][id];
+            break;
+        }
+    };
+
+    var close_script = constructReceiver(url_id, callback);
+    _window[WPrefix][id] = close_script;
+    var stop = function() {
+        if (_window[WPrefix][id]) {
+            aborting = 1;
+            _window[WPrefix][id](utils.closeFrame(1000, "JSONP user aborted read"));
+        }
+    };
+    return stop;
+};
+//         [*] End of lib/trans-jsonp-polling.js
+
+
+//         [*] Including lib/trans-xhr.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var AjaxBasedTransport = function() {};
+AjaxBasedTransport.prototype = new BufferedSender();
+
+AjaxBasedTransport.prototype.run = function(ri, trans_url,
+                                            url_suffix, Receiver, AjaxObject) {
+    var that = this;
+    that.ri = ri;
+    that.trans_url = trans_url;
+    that.send_constructor(createAjaxSender(AjaxObject));
+    that.poll = new Polling(ri, Receiver,
+                            trans_url + url_suffix, AjaxObject);
+};
+
+AjaxBasedTransport.prototype.doCleanup = function() {
+    var that = this;
+    if (that.poll) {
+        that.poll.abort();
+        that.poll = null;
+    }
+};
+
+// xhr-streaming
+var XhrStreamingTransport = SockJS['xhr-streaming'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/xhr_streaming', XhrReceiver, utils.XHRCorsObject);
+};
+
+XhrStreamingTransport.prototype = new AjaxBasedTransport();
+
+XhrStreamingTransport.enabled = function() {
+    // Support for CORS Ajax aka Ajax2? Opera 12 claims CORS but
+    // doesn't do streaming.
+    return (_window.XMLHttpRequest &&
+            'withCredentials' in new XMLHttpRequest() &&
+            (!/opera/i.test(navigator.userAgent)));
+};
+XhrStreamingTransport.roundTrips = 2; // preflight, ajax
+
+// Safari gets confused when a streaming ajax request is started
+// before onload. This causes the load indicator to spin indefinetely.
+XhrStreamingTransport.need_body = true;
+
+
+// According to:
+//   http://stackoverflow.com/questions/1641507/detect-browser-support-for-cross-domain-xmlhttprequests
+//   http://hacks.mozilla.org/2009/07/cross-site-xmlhttprequest-with-cors/
+
+
+// xdr-streaming
+var XdrStreamingTransport = SockJS['xdr-streaming'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/xhr_streaming', XhrReceiver, utils.XDRObject);
+};
+
+XdrStreamingTransport.prototype = new AjaxBasedTransport();
+
+XdrStreamingTransport.enabled = function() {
+    return !!_window.XDomainRequest;
+};
+XdrStreamingTransport.roundTrips = 2; // preflight, ajax
+
+
+
+// xhr-polling
+var XhrPollingTransport = SockJS['xhr-polling'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/xhr', XhrReceiver, utils.XHRCorsObject);
+};
+
+XhrPollingTransport.prototype = new AjaxBasedTransport();
+
+XhrPollingTransport.enabled = XhrStreamingTransport.enabled;
+XhrPollingTransport.roundTrips = 2; // preflight, ajax
+
+
+// xdr-polling
+var XdrPollingTransport = SockJS['xdr-polling'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/xhr', XhrReceiver, utils.XDRObject);
+};
+
+XdrPollingTransport.prototype = new AjaxBasedTransport();
+
+XdrPollingTransport.enabled = XdrStreamingTransport.enabled;
+XdrPollingTransport.roundTrips = 2; // preflight, ajax
+//         [*] End of lib/trans-xhr.js
+
+
+//         [*] Including lib/trans-iframe.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+// Few cool transports do work only for same-origin. In order to make
+// them working cross-domain we shall use iframe, served form the
+// remote domain. New browsers, have capabilities to communicate with
+// cross domain iframe, using postMessage(). In IE it was implemented
+// from IE 8+, but of course, IE got some details wrong:
+//    http://msdn.microsoft.com/en-us/library/cc197015(v=VS.85).aspx
+//    http://stevesouders.com/misc/test-postmessage.php
+
+var IframeTransport = function() {};
+
+IframeTransport.prototype.i_constructor = function(ri, trans_url, base_url) {
+    var that = this;
+    that.ri = ri;
+    that.origin = utils.getOrigin(base_url);
+    that.base_url = base_url;
+    that.trans_url = trans_url;
+
+    var iframe_url = base_url + '/iframe.html';
+    if (that.ri._options.devel) {
+        iframe_url += '?t=' + (+new Date);
+    }
+    that.window_id = utils.random_string(8);
+    iframe_url += '#' + that.window_id;
+
+    that.iframeObj = utils.createIframe(iframe_url, function(r) {
+                                            that.ri._didClose(1006, "Unable to load an iframe (" + r + ")");
+                                        });
+
+    that.onmessage_cb = utils.bind(that.onmessage, that);
+    utils.attachMessage(that.onmessage_cb);
+};
+
+IframeTransport.prototype.doCleanup = function() {
+    var that = this;
+    if (that.iframeObj) {
+        utils.detachMessage(that.onmessage_cb);
+        try {
+            // When the iframe is not loaded, IE raises an exception
+            // on 'contentWindow'.
+            if (that.iframeObj.iframe.contentWindow) {
+                that.postMessage('c');
+            }
+        } catch (x) {}
+        that.iframeObj.cleanup();
+        that.iframeObj = null;
+        that.onmessage_cb = that.iframeObj = null;
+    }
+};
+
+IframeTransport.prototype.onmessage = function(e) {
+    var that = this;
+    if (e.origin !== that.origin) return;
+    var window_id = e.data.slice(0, 8);
+    var type = e.data.slice(8, 9);
+    var data = e.data.slice(9);
+
+    if (window_id !== that.window_id) return;
+
+    switch(type) {
+    case 's':
+        that.iframeObj.loaded();
+        that.postMessage('s', JSON.stringify([SockJS.version, that.protocol, that.trans_url, that.base_url]));
+        break;
+    case 't':
+        that.ri._didMessage(data);
+        break;
+    }
+};
+
+IframeTransport.prototype.postMessage = function(type, data) {
+    var that = this;
+    that.iframeObj.post(that.window_id + type + (data || ''), that.origin);
+};
+
+IframeTransport.prototype.doSend = function (message) {
+    this.postMessage('m', message);
+};
+
+IframeTransport.enabled = function() {
+    // postMessage misbehaves in konqueror 4.6.5 - the messages are delivered with
+    // huge delay, or not at all.
+    var konqueror = navigator && navigator.userAgent && navigator.userAgent.indexOf('Konqueror') !== -1;
+    return ((typeof _window.postMessage === 'function' ||
+            typeof _window.postMessage === 'object') && (!konqueror));
+};
+//         [*] End of lib/trans-iframe.js
+
+
+//         [*] Including lib/trans-iframe-within.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var curr_window_id;
+
+var postMessage = function (type, data) {
+    if(parent !== _window) {
+        parent.postMessage(curr_window_id + type + (data || ''), '*');
+    } else {
+        utils.log("Can't postMessage, no parent window.", type, data);
+    }
+};
+
+var FacadeJS = function() {};
+FacadeJS.prototype._didClose = function (code, reason) {
+    postMessage('t', utils.closeFrame(code, reason));
+};
+FacadeJS.prototype._didMessage = function (frame) {
+    postMessage('t', frame);
+};
+FacadeJS.prototype._doSend = function (data) {
+    this._transport.doSend(data);
+};
+FacadeJS.prototype._doCleanup = function () {
+    this._transport.doCleanup();
+};
+
+utils.parent_origin = undefined;
+
+SockJS.bootstrap_iframe = function() {
+    var facade;
+    curr_window_id = _document.location.hash.slice(1);
+    var onMessage = function(e) {
+        if(e.source !== parent) return;
+        if(typeof utils.parent_origin === 'undefined')
+            utils.parent_origin = e.origin;
+        if (e.origin !== utils.parent_origin) return;
+
+        var window_id = e.data.slice(0, 8);
+        var type = e.data.slice(8, 9);
+        var data = e.data.slice(9);
+        if (window_id !== curr_window_id) return;
+        switch(type) {
+        case 's':
+            var p = JSON.parse(data);
+            var version = p[0];
+            var protocol = p[1];
+            var trans_url = p[2];
+            var base_url = p[3];
+            if (version !== SockJS.version) {
+                utils.log("Incompatibile SockJS! Main site uses:" +
+                          " \"" + version + "\", the iframe:" +
+                          " \"" + SockJS.version + "\".");
+            }
+            if (!utils.flatUrl(trans_url) || !utils.flatUrl(base_url)) {
+                utils.log("Only basic urls are supported in SockJS");
+                return;
+            }
+
+            if (!utils.isSameOriginUrl(trans_url) ||
+                !utils.isSameOriginUrl(base_url)) {
+                utils.log("Can't connect to different domain from within an " +
+                          "iframe. (" + JSON.stringify([_window.location.href, trans_url, base_url]) +
+                          ")");
+                return;
+            }
+            facade = new FacadeJS();
+            facade._transport = new FacadeJS[protocol](facade, trans_url, base_url);
+            break;
+        case 'm':
+            facade._doSend(data);
+            break;
+        case 'c':
+            if (facade)
+                facade._doCleanup();
+            facade = null;
+            break;
+        }
+    };
+
+    // alert('test ticker');
+    // facade = new FacadeJS();
+    // facade._transport = new FacadeJS['w-iframe-xhr-polling'](facade, 'http://host.com:9999/ticker/12/basd');
+
+    utils.attachMessage(onMessage);
+
+    // Start
+    postMessage('s');
+};
+//         [*] End of lib/trans-iframe-within.js
+
+
+//         [*] Including lib/info.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var InfoReceiver = function(base_url, AjaxObject) {
+    var that = this;
+    utils.delay(function(){that.doXhr(base_url, AjaxObject);});
+};
+
+InfoReceiver.prototype = new EventEmitter(['finish']);
+
+InfoReceiver.prototype.doXhr = function(base_url, AjaxObject) {
+    var that = this;
+    var t0 = (new Date()).getTime();
+    var xo = new AjaxObject('GET', base_url + '/info');
+
+    var tref = utils.delay(8000,
+                           function(){xo.ontimeout();});
+
+    xo.onfinish = function(status, text) {
+        clearTimeout(tref);
+        tref = null;
+        if (status === 200) {
+            var rtt = (new Date()).getTime() - t0;
+            var info = JSON.parse(text);
+            if (typeof info !== 'object') info = {};
+            that.emit('finish', info, rtt);
+        } else {
+            that.emit('finish');
+        }
+    };
+    xo.ontimeout = function() {
+        xo.close();
+        that.emit('finish');
+    };
+};
+
+var InfoReceiverIframe = function(base_url) {
+    var that = this;
+    var go = function() {
+        var ifr = new IframeTransport();
+        ifr.protocol = 'w-iframe-info-receiver';
+        var fun = function(r) {
+            if (typeof r === 'string' && r.substr(0,1) === 'm') {
+                var d = JSON.parse(r.substr(1));
+                var info = d[0], rtt = d[1];
+                that.emit('finish', info, rtt);
+            } else {
+                that.emit('finish');
+            }
+            ifr.doCleanup();
+            ifr = null;
+        };
+        var mock_ri = {
+            _options: {},
+            _didClose: fun,
+            _didMessage: fun
+        };
+        ifr.i_constructor(mock_ri, base_url, base_url);
+    }
+    if(!_document.body) {
+        utils.attachEvent('load', go);
+    } else {
+        go();
+    }
+};
+InfoReceiverIframe.prototype = new EventEmitter(['finish']);
+
+
+var InfoReceiverFake = function() {
+    // It may not be possible to do cross domain AJAX to get the info
+    // data, for example for IE7. But we want to run JSONP, so let's
+    // fake the response, with rtt=2s (rto=6s).
+    var that = this;
+    utils.delay(function() {
+        that.emit('finish', {}, 2000);
+    });
+};
+InfoReceiverFake.prototype = new EventEmitter(['finish']);
+
+var createInfoReceiver = function(base_url) {
+    if (utils.isSameOriginUrl(base_url)) {
+        // If, for some reason, we have SockJS locally - there's no
+        // need to start up the complex machinery. Just use ajax.
+        return new InfoReceiver(base_url, utils.XHRLocalObject);
+    }
+    switch (utils.isXHRCorsCapable()) {
+    case 1:
+        // XHRLocalObject -> no_credentials=true
+        return new InfoReceiver(base_url, utils.XHRLocalObject);
+    case 2:
+        return new InfoReceiver(base_url, utils.XDRObject);
+    case 3:
+        // Opera
+        return new InfoReceiverIframe(base_url);
+    default:
+        // IE 7
+        return new InfoReceiverFake();
+    };
+};
+
+
+var WInfoReceiverIframe = FacadeJS['w-iframe-info-receiver'] = function(ri, _trans_url, base_url) {
+    var ir = new InfoReceiver(base_url, utils.XHRLocalObject);
+    ir.onfinish = function(info, rtt) {
+        ri._didMessage('m'+JSON.stringify([info, rtt]));
+        ri._didClose();
+    }
+};
+WInfoReceiverIframe.prototype.doCleanup = function() {};
+//         [*] End of lib/info.js
+
+
+//         [*] Including lib/trans-iframe-eventsource.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var EventSourceIframeTransport = SockJS['iframe-eventsource'] = function () {
+    var that = this;
+    that.protocol = 'w-iframe-eventsource';
+    that.i_constructor.apply(that, arguments);
+};
+
+EventSourceIframeTransport.prototype = new IframeTransport();
+
+EventSourceIframeTransport.enabled = function () {
+    return ('EventSource' in _window) && IframeTransport.enabled();
+};
+
+EventSourceIframeTransport.need_body = true;
+EventSourceIframeTransport.roundTrips = 3; // html, javascript, eventsource
+
+
+// w-iframe-eventsource
+var EventSourceTransport = FacadeJS['w-iframe-eventsource'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/eventsource', EventSourceReceiver, utils.XHRLocalObject);
+}
+EventSourceTransport.prototype = new AjaxBasedTransport();
+//         [*] End of lib/trans-iframe-eventsource.js
+
+
+//         [*] Including lib/trans-iframe-xhr-polling.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var XhrPollingIframeTransport = SockJS['iframe-xhr-polling'] = function () {
+    var that = this;
+    that.protocol = 'w-iframe-xhr-polling';
+    that.i_constructor.apply(that, arguments);
+};
+
+XhrPollingIframeTransport.prototype = new IframeTransport();
+
+XhrPollingIframeTransport.enabled = function () {
+    return _window.XMLHttpRequest && IframeTransport.enabled();
+};
+
+XhrPollingIframeTransport.need_body = true;
+XhrPollingIframeTransport.roundTrips = 3; // html, javascript, xhr
+
+
+// w-iframe-xhr-polling
+var XhrPollingITransport = FacadeJS['w-iframe-xhr-polling'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/xhr', XhrReceiver, utils.XHRLocalObject);
+};
+
+XhrPollingITransport.prototype = new AjaxBasedTransport();
+//         [*] End of lib/trans-iframe-xhr-polling.js
+
+
+//         [*] Including lib/trans-iframe-htmlfile.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+// This transport generally works in any browser, but will cause a
+// spinning cursor to appear in any browser other than IE.
+// We may test this transport in all browsers - why not, but in
+// production it should be only run in IE.
+
+var HtmlFileIframeTransport = SockJS['iframe-htmlfile'] = function () {
+    var that = this;
+    that.protocol = 'w-iframe-htmlfile';
+    that.i_constructor.apply(that, arguments);
+};
+
+// Inheritance.
+HtmlFileIframeTransport.prototype = new IframeTransport();
+
+HtmlFileIframeTransport.enabled = function() {
+    return IframeTransport.enabled();
+};
+
+HtmlFileIframeTransport.need_body = true;
+HtmlFileIframeTransport.roundTrips = 3; // html, javascript, htmlfile
+
+
+// w-iframe-htmlfile
+var HtmlFileTransport = FacadeJS['w-iframe-htmlfile'] = function(ri, trans_url) {
+    this.run(ri, trans_url, '/htmlfile', HtmlfileReceiver, utils.XHRLocalObject);
+};
+HtmlFileTransport.prototype = new AjaxBasedTransport();
+//         [*] End of lib/trans-iframe-htmlfile.js
+
+
+//         [*] Including lib/trans-polling.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var Polling = function(ri, Receiver, recv_url, AjaxObject) {
+    var that = this;
+    that.ri = ri;
+    that.Receiver = Receiver;
+    that.recv_url = recv_url;
+    that.AjaxObject = AjaxObject;
+    that._scheduleRecv();
+};
+
+Polling.prototype._scheduleRecv = function() {
+    var that = this;
+    var poll = that.poll = new that.Receiver(that.recv_url, that.AjaxObject);
+    var msg_counter = 0;
+    poll.onmessage = function(e) {
+        msg_counter += 1;
+        that.ri._didMessage(e.data);
+    };
+    poll.onclose = function(e) {
+        that.poll = poll = poll.onmessage = poll.onclose = null;
+        if (!that.poll_is_closing) {
+            if (e.reason === 'permanent') {
+                that.ri._didClose(1006, 'Polling error (' + e.reason + ')');
+            } else {
+                that._scheduleRecv();
+            }
+        }
+    };
+};
+
+Polling.prototype.abort = function() {
+    var that = this;
+    that.poll_is_closing = true;
+    if (that.poll) {
+        that.poll.abort();
+    }
+};
+//         [*] End of lib/trans-polling.js
+
+
+//         [*] Including lib/trans-receiver-eventsource.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var EventSourceReceiver = function(url) {
+    var that = this;
+    var es = new EventSource(url);
+    es.onmessage = function(e) {
+        that.dispatchEvent(new SimpleEvent('message',
+                                           {'data': unescape(e.data)}));
+    };
+    that.es_close = es.onerror = function(e, abort_reason) {
+        // ES on reconnection has readyState = 0 or 1.
+        // on network error it's CLOSED = 2
+        var reason = abort_reason ? 'user' :
+            (es.readyState !== 2 ? 'network' : 'permanent');
+        that.es_close = es.onmessage = es.onerror = null;
+        // EventSource reconnects automatically.
+        es.close();
+        es = null;
+        // Safari and chrome < 15 crash if we close window before
+        // waiting for ES cleanup. See:
+        //   https://code.google.com/p/chromium/issues/detail?id=89155
+        utils.delay(200, function() {
+                        that.dispatchEvent(new SimpleEvent('close', {reason: reason}));
+                    });
+    };
+};
+
+EventSourceReceiver.prototype = new REventTarget();
+
+EventSourceReceiver.prototype.abort = function() {
+    var that = this;
+    if (that.es_close) {
+        that.es_close({}, true);
+    }
+};
+//         [*] End of lib/trans-receiver-eventsource.js
+
+
+//         [*] Including lib/trans-receiver-htmlfile.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var _is_ie_htmlfile_capable;
+var isIeHtmlfileCapable = function() {
+    if (_is_ie_htmlfile_capable === undefined) {
+        if ('ActiveXObject' in _window) {
+            try {
+                _is_ie_htmlfile_capable = !!new ActiveXObject('htmlfile');
+            } catch (x) {}
+        } else {
+            _is_ie_htmlfile_capable = false;
+        }
+    }
+    return _is_ie_htmlfile_capable;
+};
+
+
+var HtmlfileReceiver = function(url) {
+    var that = this;
+    utils.polluteGlobalNamespace();
+
+    that.id = 'a' + utils.random_string(6, 26);
+    url += ((url.indexOf('?') === -1) ? '?' : '&') +
+        'c=' + escape(WPrefix + '.' + that.id);
+
+    var constructor = isIeHtmlfileCapable() ?
+        utils.createHtmlfile : utils.createIframe;
+
+    var iframeObj;
+    _window[WPrefix][that.id] = {
+        start: function () {
+            iframeObj.loaded();
+        },
+        message: function (data) {
+            that.dispatchEvent(new SimpleEvent('message', {'data': data}));
+        },
+        stop: function () {
+            that.iframe_close({}, 'network');
+        }
+    };
+    that.iframe_close = function(e, abort_reason) {
+        iframeObj.cleanup();
+        that.iframe_close = iframeObj = null;
+        delete _window[WPrefix][that.id];
+        that.dispatchEvent(new SimpleEvent('close', {reason: abort_reason}));
+    };
+    iframeObj = constructor(url, function(e) {
+                                that.iframe_close({}, 'permanent');
+                            });
+};
+
+HtmlfileReceiver.prototype = new REventTarget();
+
+HtmlfileReceiver.prototype.abort = function() {
+    var that = this;
+    if (that.iframe_close) {
+        that.iframe_close({}, 'user');
+    }
+};
+//         [*] End of lib/trans-receiver-htmlfile.js
+
+
+//         [*] Including lib/trans-receiver-xhr.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+var XhrReceiver = function(url, AjaxObject) {
+    var that = this;
+    var buf_pos = 0;
+
+    that.xo = new AjaxObject('POST', url, null);
+    that.xo.onchunk = function(status, text) {
+        if (status !== 200) return;
+        while (1) {
+            var buf = text.slice(buf_pos);
+            var p = buf.indexOf('\n');
+            if (p === -1) break;
+            buf_pos += p+1;
+            var msg = buf.slice(0, p);
+            that.dispatchEvent(new SimpleEvent('message', {data: msg}));
+        }
+    };
+    that.xo.onfinish = function(status, text) {
+        that.xo.onchunk(status, text);
+        that.xo = null;
+        var reason = status === 200 ? 'network' : 'permanent';
+        that.dispatchEvent(new SimpleEvent('close', {reason: reason}));
+    }
+};
+
+XhrReceiver.prototype = new REventTarget();
+
+XhrReceiver.prototype.abort = function() {
+    var that = this;
+    if (that.xo) {
+        that.xo.close();
+        that.dispatchEvent(new SimpleEvent('close', {reason: 'user'}));
+        that.xo = null;
+    }
+};
+//         [*] End of lib/trans-receiver-xhr.js
+
+
+//         [*] Including lib/test-hooks.js
+/*
+ * ***** BEGIN LICENSE BLOCK *****
+ * Copyright (c) 2011-2012 VMware, Inc.
+ *
+ * For the license see COPYING.
+ * ***** END LICENSE BLOCK *****
+ */
+
+// For testing
+SockJS.getUtils = function(){
+    return utils;
+};
+
+SockJS.getIframeTransport = function(){
+    return IframeTransport;
+};
+//         [*] End of lib/test-hooks.js
+
+                  return SockJS;
+          })();
+if ('_sockjs_onload' in window) setTimeout(_sockjs_onload, 1);
+
+// AMD compliance
+if (typeof define === 'function' && define.amd) {
+    define('sockjs', [], function(){return SockJS;});
+}
+//     [*] End of lib/index.js
+
+// [*] End of lib/all.js
+
index 0bdb15821ec5678f227af1b5f0e4698fdffd17ca..026f1ecaab73d1181823c916790dcc205cef7a1a 100644 (file)
@@ -1,7 +1,7 @@
 <!DOCTYPE html>
 <html><head>
   <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.2/jquery.min.js"></script>
-  <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+  <script src="sockjs-0.3.js"></script>
   <script src="stomp.js"></script>
   <style>
       .box {
diff --git a/rabbitmq-server/plugins-src/rabbitmq-web-stomp/CONTRIBUTING.md b/rabbitmq-server/plugins-src/rabbitmq-web-stomp/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index e7fa61ac837089d43576b046d736dd18199cc684..48376d8f13c025d46e1dfb62ec48058d3aac41d4 100644 (file)
@@ -11,10 +11,10 @@ port 15674, for example a valid SockJS endpoint url may look like:
 Once the server is started you should be able to establish a SockJS
 connection to this url. You will be able to communicate using the
 usual STOMP protocol over it. For example, a page using Jeff Mesnil's
-"stomp-websocket" project may look like this:
+"stomp-websocket" project and SockJS may look like this:
 
 
-    <script src="http://cdn.sockjs.org/sockjs-0.3.min.js"></script>
+    <script src="sockjs-0.3.min.js"></script>
     <script src="stomp.js"></script>
     <script>
         Stomp.WebSocketClass = SockJS;
index 8f2699b07a3fc5bdf502443bfaad61c4df07d2bc..3cc7ebd72bb0633278b75b8886f8c307da2f6717 100644 (file)
@@ -32,7 +32,8 @@ init() ->
                     <<"/stomp">>, fun service_stomp/3, {}, SockjsOpts),
     VhostRoutes = [{[<<"stomp">>, '...'], sockjs_cowboy_handler, SockjsState}],
     Routes = [{'_',  VhostRoutes}], % any vhost
-    cowboy:start_listener(http, 100,
+    NbAcceptors = get_env(nb_acceptors, 100),
+    cowboy:start_listener(http, NbAcceptors,
                           cowboy_tcp_transport, [{port,     Port}],
                           cowboy_http_protocol, [{dispatch, Routes}]),
     rabbit_log:info("rabbit_web_stomp: listening for HTTP connections on ~s:~w~n",
@@ -43,7 +44,7 @@ init() ->
         Conf ->
             rabbit_networking:ensure_ssl(),
             TLSPort = proplists:get_value(port, Conf),
-            cowboy:start_listener(https, 100,
+            cowboy:start_listener(https, NbAcceptors,
                                   cowboy_ssl_transport, Conf,
                                   cowboy_http_protocol, [{dispatch, Routes}]),
             rabbit_log:info("rabbit_web_stomp: listening for HTTPS connections on ~s:~w~n",
index a23e2fd3cafcda2d9cb1312fc4f4d16e08e228d1..710a7f6acffd477698e8686086d6ecc829df80d0 100644 (file)
@@ -30,7 +30,7 @@ unmarshal(Frame) ->
     [Head, Body] = binary:split(Frame, <<"\n\n">>),
     [Command | HeaderLines] = binary:split(Head, <<"\n">>, [global]),
     Headers = [list_to_tuple(binary:split(Line, <<":">>)) || Line <- HeaderLines],
-    [Body1, <<>>] = binary:split(Body, [<<0>>],[{scope,{byte_size(Body)-1, 1}}]),
+    [Body1, <<>>] = binary:split(Body, [<<0, 10>>],[{scope,{byte_size(Body)-2, 2}}]),
     {Command, Headers, Body1}.
 
 %% ----------
index c9defee909c9fd9221191a82e8fa489342c38b6d..df7276106c1a85dbfe2952ead8037e2cfac2f77a 100644 (file)
@@ -10,8 +10,8 @@ SIGNING_KEY=056E8E56
 SIGNING_USER_EMAIL=info@rabbitmq.com
 SIGNING_USER_ID=RabbitMQ Release Signing Key <info@rabbitmq.com>
 
-# Misc options to pass to hg commands
-HG_OPTS=
+# Misc options to pass to git commands
+GIT_OPTS=
 
 # Misc options to pass to ssh commands
 SSH_OPTS=
@@ -35,10 +35,10 @@ SKIP_EMULATOR_VERSION_CHECK=
 
 REPOS:=rabbitmq-codegen rabbitmq-server rabbitmq-java-client rabbitmq-dotnet-client rabbitmq-test
 
-HGREPOBASE:=$(shell dirname `hg paths default 2>/dev/null` 2>/dev/null)
+GITREPOBASE:=$(shell dirname `git remote -v 2>/dev/null | awk '/^origin\t.+ \(fetch\)$$/ { print $$2; }'` 2>/dev/null)
 
-ifeq ($(HGREPOBASE),)
-HGREPOBASE=ssh://hg@hg.rabbitmq.com
+ifeq ($(GITREPOBASE),)
+GITREPOBASE=https://github.com/rabbitmq
 endif
 
 .PHONY: all
@@ -130,6 +130,7 @@ rabbitmq-server-windows-packaging: rabbitmq-server-srcdist
 
 .PHONY: rabbitmq-server-windows-exe-packaging
 rabbitmq-server-windows-exe-packaging: rabbitmq-server-windows-packaging
+       $(MAKE) -C rabbitmq-server/packaging/windows-exe clean
        $(MAKE) -C rabbitmq-server/packaging/windows-exe dist VERSION=$(VERSION)
        cp rabbitmq-server/packaging/windows-exe/rabbitmq-server-*.exe $(SERVER_PACKAGES_DIR)
 
diff --git a/rabbitmq-server/plugins-src/sockjs-erlang-wrapper/CONTRIBUTING.md b/rabbitmq-server/plugins-src/sockjs-erlang-wrapper/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/plugins-src/webmachine-wrapper/CONTRIBUTING.md b/rabbitmq-server/plugins-src/webmachine-wrapper/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..69a4b4a
--- /dev/null
@@ -0,0 +1,51 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## (Brief) Code of Conduct
+
+In one line: don't be a dick.
+
+Be respectful to the maintainers and other contributors. Open source
+contributors put long hours into developing projects and doing user
+support. Those projects and user support are available for free. We
+believe this deserves some respect.
+
+Be respectful to people of all races, genders, religious beliefs and
+political views. Regardless of how brilliant a pull request is
+technically, we will not tolerate disrespectful or aggressive
+behaviour.
+
+Contributors who violate this straightforward Code of Conduct will see
+their pull requests closed and locked.
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 1d9afbd8a530440a2270064670518dfdaa3d1ca0..26f6af7cff69cee780f332b6f4cade87d4836ec7 100755 (executable)
@@ -12,7 +12,7 @@
 ##  The Original Code is RabbitMQ.
 ##
 ##  The Initial Developer of the Original Code is GoPivotal, Inc.
-##  Copyright (c) 2012-2014 GoPivotal, Inc.  All rights reserved.
+##  Copyright (c) 2012-2015 Pivotal Software, Inc.  All rights reserved.
 ##
 
 ### next line potentially updated in package install steps
@@ -26,11 +26,14 @@ SASL_BOOT_FILE=start_sasl
 
 ## Set default values
 
+BOOT_MODULE="rabbit"
+
 CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq
 LOG_BASE=${SYS_PREFIX}/var/log/rabbitmq
 MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
 ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
 
 PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
+IO_THREAD_POOL_SIZE=64
 
 CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
diff --git a/rabbitmq-server/scripts/rabbitmq-defaults.bat b/rabbitmq-server/scripts/rabbitmq-defaults.bat
new file mode 100755 (executable)
index 0000000..d3983f2
--- /dev/null
@@ -0,0 +1,37 @@
+@echo off
+
+REM ### next line potentially updated in package install steps
+REM set SYS_PREFIX=
+
+REM ### next line will be updated when generating a standalone release
+REM ERL_DIR=
+set ERL_DIR=
+
+REM These boot files don't appear to be referenced in the batch scripts
+REM set CLEAN_BOOT_FILE=start_clean
+REM set SASL_BOOT_FILE=start_sasl
+
+REM ## Set default values
+
+if "!RABBITMQ_BASE!"=="" (
+    set RABBITMQ_BASE=!APPDATA!\RabbitMQ
+)
+
+REM BOOT_MODULE="rabbit"
+REM CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq
+REM LOG_BASE=${SYS_PREFIX}/var/log/rabbitmq
+REM MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
+REM ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
+set BOOT_MODULE=rabbit
+set CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
+set LOG_BASE=!RABBITMQ_BASE!\log
+set MNESIA_BASE=!RABBITMQ_BASE!\db
+set ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+
+REM PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
+set PLUGINS_DIR=!TDP0!..\plugins
+
+REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (
+    set CONF_ENV_FILE=!APPDATA!\RabbitMQ\rabbitmq-env-conf.bat
+)
index 5c652c30c022cdfe8a44ec6989aa1f9c86b6216f..6262a1638f80eda53121f9690f73d7a9b1fa3837 100755 (executable)
@@ -2,10 +2,16 @@
 
 REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
 REM
-REM <rabbitmq_nodename> sname of the erlang node to connect to (required)
+REM <rabbitmq_nodename> (s)name of the erlang node to connect to (required)
 
 setlocal
 
+set TDP0=%~dp0
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "!TDP0!\rabbitmq-env.bat"
+
 if "%1"=="" goto fail
 
 :: set timeout vars ::
@@ -19,7 +25,7 @@ if not exist "%WMIC_PATH%" (
 )
 
 :getpid
-for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%-sname %1%%'" get processid 2^>nul`) do (
+for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%%RABBITMQ_NAME_TYPE% %1%%'" get processid 2^>nul`) do (
   set PID=%%P
   goto echopid
 )
index 69d5a9c9d0af8984c5d2bb66ca0a9aa8f6657270..a5bf52ab6a5b95904e1b8aa5bde027bbeeaf813e 100755 (executable)
@@ -12,7 +12,7 @@
 ##  The Original Code is RabbitMQ.
 ##
 ##  The Initial Developer of the Original Code is GoPivotal, Inc.
-##  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+##  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 ##
 
 # We set +e here since since our test for "readlink -f" below needs to
@@ -39,15 +39,12 @@ set -e
 
 SCRIPT_DIR=`dirname $SCRIPT_PATH`
 RABBITMQ_HOME="${SCRIPT_DIR}/.."
-[ "x" = "x$HOSTNAME" ] && HOSTNAME=`env hostname`
-NODENAME=rabbit@${HOSTNAME%%.*}
 
 ## Set defaults
 . ${SCRIPT_DIR}/rabbitmq-defaults
 
 ## Common defaults
-SERVER_ERL_ARGS="+K true +A30 +P 1048576 \
-  -kernel inet_default_connect_options [{nodelay,true}]"
+SERVER_ERL_ARGS="+P 1048576"
 
 # warn about old rabbitmq.conf file, if no new one
 if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
@@ -56,5 +53,138 @@ if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
     echo "location has moved to ${CONF_ENV_FILE}"
 fi
 
+# We save the current value of $RABBITMQ_PID_FILE in case it was set by
+# an init script. If $CONF_ENV_FILE overrides it again, we must ignore
+# it and warn the user.
+saved_RABBITMQ_PID_FILE=$RABBITMQ_PID_FILE
+
 ## Get configuration variables from the configure environment file
 [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
+
+if [ "$saved_RABBITMQ_PID_FILE" -a \
+     "$saved_RABBITMQ_PID_FILE" != "$RABBITMQ_PID_FILE" ]; then
+    echo "WARNING: RABBITMQ_PID_FILE was already set by the init script to:" 1>&2
+    echo "           $saved_RABBITMQ_PID_FILE" 1>&2
+    echo "         The value set in rabbitmq-env.conf is ignored because it" 1>&2
+    echo "         would break the init script." 1>&2
+
+    RABBITMQ_PID_FILE="$saved_RABBITMQ_PID_FILE"
+fi
+
+[ "x" = "x$RABBITMQ_USE_LONGNAME" ] && RABBITMQ_USE_LONGNAME=${USE_LONGNAME}
+if [ "xtrue" = "x$RABBITMQ_USE_LONGNAME" ] ; then
+    RABBITMQ_NAME_TYPE=-name
+    [ "x" = "x$HOSTNAME" ] && HOSTNAME=`env hostname -f`
+    [ "x" = "x$NODENAME" ] && NODENAME=rabbit@${HOSTNAME}
+else
+    RABBITMQ_NAME_TYPE=-sname
+    [ "x" = "x$HOSTNAME" ] && HOSTNAME=`env hostname`
+    [ "x" = "x$NODENAME" ] && NODENAME=rabbit@${HOSTNAME%%.*}
+fi
+
+##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+rmq_realpath() {
+    local path=$1
+
+    if [ -d "$path" ]; then
+        cd "$path" && pwd
+    elif [ -f "$path" ]; then
+        cd "$(dirname "$path")" && echo $(pwd)/$(basename "$path")
+    else
+        echo "$path"
+    fi
+}
+
+rmq_check_if_shared_with_mnesia() {
+    local var
+
+    local mnesia_dir=$(rmq_realpath "${RABBITMQ_MNESIA_DIR}")
+    local prefix="WARNING:"
+
+    for var in "$@"; do
+        local dir=$(eval "echo \"\$$var\"")
+
+        case $(rmq_realpath "$dir") in
+        ${mnesia_dir})
+            warning=1
+            echo "$prefix $var is equal to RABBITMQ_MNESIA_DIR" 1>&2
+            ;;
+        ${mnesia_dir}/*)
+            warning=1
+            echo "$prefix $var is located inside RABBITMQ_MNESIA_DIR" 1>&2
+            ;;
+        esac
+
+        if [ "x$warning" = "x1" ]; then
+            prefix="        "
+        fi
+    done
+
+    if [ "x$warning" = "x1" ]; then
+        echo "$prefix => Auto-clustering will not work ('cluster_nodes' in rabbitmq.config)" 1>&2
+    fi
+}
+
+DEFAULT_NODE_IP_ADDRESS=auto
+DEFAULT_NODE_PORT=5672
+[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
+[ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
+
+[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
+[ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
+
+[ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
+[ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${DEFAULT_NODE_PORT} + 20000))
+[ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${RABBITMQ_NODE_PORT} + 20000))
+
+[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+[ "x" = "x$RABBITMQ_IO_THREAD_POOL_SIZE" ] && RABBITMQ_IO_THREAD_POOL_SIZE=${IO_THREAD_POOL_SIZE}
+[ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+[ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
+[ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
+[ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
+[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
+[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+
+[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
+[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
+
+[ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+
+[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
+[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+
+[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
+
+[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
+
+## Log rotation
+[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
+[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
+[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
+[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
+
+[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
+
+# Check if files and directories non-related to Mnesia are configured
+# to be in $RABBITMQ_MNESIA_DIR. If this is the case, issue a warning
+# because it will prevent auto-clustering from working (the node will be
+# considered non-virgin).
+
+rmq_check_if_shared_with_mnesia \
+    RABBITMQ_CONFIG_FILE \
+    RABBITMQ_LOG_BASE \
+    RABBITMQ_PID_FILE \
+    RABBITMQ_PLUGINS_EXPAND_DIR \
+    RABBITMQ_ENABLED_PLUGINS_FILE \
+    RABBITMQ_PLUGINS_DIR \
+    RABBITMQ_LOGS \
+    RABBITMQ_SASL_LOGS
+
+##--- End of overridden <var_name> variables
+
+# Since we source this elsewhere, don't accidentally stop execution
+true
diff --git a/rabbitmq-server/scripts/rabbitmq-env.bat b/rabbitmq-server/scripts/rabbitmq-env.bat
new file mode 100755 (executable)
index 0000000..8657f1e
--- /dev/null
@@ -0,0 +1,252 @@
+@echo off
+
+REM Scopes the variables to the current batch file
+REM setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+REM setlocal enabledelayedexpansion
+
+REM SCRIPT_DIR=`dirname $SCRIPT_PATH`
+REM RABBITMQ_HOME="${SCRIPT_DIR}/.."
+set SCRIPT_DIR=%TDP0%
+set RABBITMQ_HOME=%SCRIPT_DIR%..
+
+REM ## Set defaults
+REM . ${SCRIPT_DIR}/rabbitmq-defaults
+call "%SCRIPT_DIR%\rabbitmq-defaults.bat"
+
+REM These common defaults aren't referenced in the batch scripts
+REM ## Common defaults
+REM SERVER_ERL_ARGS="+P 1048576"
+REM 
+REM # warn about old rabbitmq.conf file, if no new one
+REM if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
+REM    [ ! -f ${CONF_ENV_FILE} ] ; then
+REM     echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
+REM     echo "location has moved to ${CONF_ENV_FILE}"
+REM fi
+
+REM ERL_ARGS aren't referenced in the batch scripts
+REM Common defaults
+REM set SERVER_ERL_ARGS=+P 1048576
+
+REM ## Get configuration variables from the configure environment file
+REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
+if exist "!RABBITMQ_CONF_ENV_FILE!" (
+       call !RABBITMQ_CONF_ENV_FILE!
+)
+
+REM Check for the short names here too
+if "!RABBITMQ_USE_LONGNAME!"=="" (
+    if "!USE_LONGNAME!"=="" (
+           set RABBITMQ_NAME_TYPE="-sname"
+       )
+)
+
+if "!RABBITMQ_USE_LONGNAME!"=="true" (
+    if "!USE_LONGNAME!"=="true" (
+        set RABBITMQ_NAME_TYPE="-name"
+       )
+)
+
+if "!COMPUTERNAME!"=="" (
+    set COMPUTERNAME=localhost
+)
+
+REM [ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+if "!RABBITMQ_NODENAME!"=="" (
+    if "!NODENAME!"=="" (
+        set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
+    ) else (
+        set RABBITMQ_NODENAME=!NODENAME!
+    )
+)
+
+REM 
+REM ##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+REM 
+REM DEFAULT_NODE_IP_ADDRESS=auto
+REM DEFAULT_NODE_PORT=5672
+REM [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
+REM [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
+REM [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
+REM [ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
+
+REM if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+REM    if not "!RABBITMQ_NODE_PORT!"=="" (
+REM       set RABBITMQ_NODE_IP_ADDRESS=auto
+REM    )
+REM ) else (
+REM    if "!RABBITMQ_NODE_PORT!"=="" (
+REM       set RABBITMQ_NODE_PORT=5672
+REM    )
+REM )
+
+REM DOUBLE CHECK THIS LOGIC
+if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+       if "!NODE_IP_ADDRESS!"=="" (
+               set RABBITMQ_NODE_IP_ADDRESS=auto
+       ) else (
+               set RABBITMQ_NODE_IP_ADDRESS=!NODE_IP_ADDRESS!
+       )
+)
+
+if "!RABBITMQ_NODE_PORT!"=="" (
+       if "!NODE_PORT!"=="" (
+               set RABBITMQ_NODE_PORT=5672
+       ) else (
+               set RABBITMQ_NODE_PORT=!NODE_PORT!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
+REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${DEFAULT_NODE_PORT} + 20000))
+REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${RABBITMQ_NODE_PORT} + 20000))
+
+if "!RABBITMQ_DIST_PORT!"=="" (
+       if "!DIST_PORT!"=="" (
+          if "!RABBITMQ_NODE_PORT!"=="" (
+                 set RABBITMQ_DIST_PORT=25672
+          ) else (
+                 set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
+          )
+   ) else (
+               set RABBITMQ_DIST_PORT=!DIST_PORT!
+   )
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
+if "!RABBITMQ_CONFIG_FILE!"=="" (
+       if "!CONFIG_FILE!"=="" (
+               set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
+       ) else (
+               set RABBITMQ_CONFIG_FILE=!CONFIG_FILE!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
+if "!RABBITMQ_LOG_BASE!"=="" (
+       if "!LOG_BASE!"=="" (
+               set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!\log
+       ) else (
+               set RABBITMQ_LOG_BASE=!LOG_BASE!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
+if "!RABBITMQ_MNESIA_BASE!"=="" (
+       if "!MNESIA_BASE!"=="" (
+               set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!\db
+       ) else (
+               set RABBITMQ_MNESIA_BASE=!MNESIA_BASE!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+REM No Windows equivalent 
+
+REM [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
+REM [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+if "!RABBITMQ_MNESIA_DIR!"=="" (
+       if "!MNESIA_DIR!"=="" (
+               set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
+       ) else (
+               set RABBITMQ_MNESIA_DIR=!MNESIA_DIR!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
+REM [ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+if "!RABBITMQ_BOOT_MODULE!"=="" (
+       if "!BOOT_MODULE!"=="" (
+               set RABBITMQ_BOOT_MODULE=rabbit
+       ) else (
+               set RABBITMQ_BOOT_MODULE=!BOOT_MODULE!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
+REM [ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
+       if "!PLUGINS_EXPAND_DIR!"=="" (
+               set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
+       ) else (
+               set RABBITMQ_PLUGINS_EXPAND_DIR=!PLUGINS_EXPAND_DIR!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
+if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
+       if "!ENABLED_PLUGINS_FILE!"=="" (
+               set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+       ) else (
+               set RABBITMQ_ENABLED_PLUGINS_FILE=!ENABLED_PLUGINS_FILE!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
+if "!RABBITMQ_PLUGINS_DIR!"=="" (
+       if "!PLUGINS_DIR!"=="" (
+               set RABBITMQ_PLUGINS_DIR=!RABBITMQ_BASE!\plugins
+       ) else (
+               set RABBITMQ_PLUGINS_DIR=!PLUGINS_DIR!
+       )
+)
+
+REM ## Log rotation
+REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
+REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
+if "!RABBITMQ_LOGS!"=="" (
+       if "!LOGS!"=="" (
+               set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
+       ) else (
+               set LOGS=!LOGS!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
+REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
+if "!RABBITMQ_SASL_LOGS!"=="" (
+       if "!SASL_LOGS!"=="" (
+               set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
+       ) else (
+               set SASL_LOGS=!SASL_LOGS!
+       )
+)
+
+REM [ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
+if "!$RABBITMQ_CTL_ERL_ARGS!"=="" (
+       if not "!CTL_ERL_ARGS!"=="" (
+               set RABBITMQ_CTL_ERL_ARGS=!CTL_ERL_ARGS!
+       )
+)
+
+REM ADDITIONAL WINDOWS ONLY CONFIG ITEMS
+REM rabbitmq-plugins.bat
+REM if "!RABBITMQ_SERVICENAME!"=="" (
+REM     set RABBITMQ_SERVICENAME=RabbitMQ
+REM )
+
+if "!RABBITMQ_SERVICENAME!"=="" (
+       if "!SERVICENAME!"=="" (
+               set RABBITMQ_SERVICENAME=RabbitMQ
+       ) else (
+               set RABBITMQ_SERVICENAME=!SERVICENAME!
+       )
+)
+REM ##--- End of overridden <var_name> variables
+REM 
+REM # Since we source this elsewhere, don't accidentally stop execution
+REM true
index bd7d0b6ab244878e82398beb50977c65da2c0f91..fb88bcc68d325b50afd7d95e6f15a0d5af38eb10 100755 (executable)
 ##  The Original Code is RabbitMQ.
 ##
 ##  The Initial Developer of the Original Code is GoPivotal, Inc.
-##  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+##  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 ##
 
 # Get default settings with user overrides for (RABBITMQ_)<var_name>
 # Non-empty defaults should be set in rabbitmq-env
 . `dirname $0`/rabbitmq-env
 
-##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-
-[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
-[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
-
-##--- End of overridden <var_name> variables
-
+RABBITMQ_USE_LONGNAME=${RABBITMQ_USE_LONGNAME} \
 exec ${ERL_DIR}erl \
     -pa "${RABBITMQ_HOME}/ebin" \
     -noinput \
     -hidden \
-    -sname rabbitmq-plugins$$ \
+    ${RABBITMQ_PLUGINS_ERL_ARGS} \
     -boot "${CLEAN_BOOT_FILE}" \
     -s rabbit_plugins_main \
     -enabled_plugins_file "$RABBITMQ_ENABLED_PLUGINS_FILE" \
     -plugins_dist_dir "$RABBITMQ_PLUGINS_DIR" \
+    -nodename $RABBITMQ_NODENAME \
     -extra "$@"
index 1052665515b8885e83b01b3ec5bf76f8ea36535f..6fb2f4f54681c7c728b0dc3a106a802d2e21fa33 100755 (executable)
@@ -1,57 +1,55 @@
-@echo off
-REM  The contents of this file are subject to the Mozilla Public License
-REM  Version 1.1 (the "License"); you may not use this file except in
-REM  compliance with the License. You may obtain a copy of the License
-REM  at http://www.mozilla.org/MPL/
-REM
-REM  Software distributed under the License is distributed on an "AS IS"
-REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM  the License for the specific language governing rights and
-REM  limitations under the License.
-REM
-REM  The Original Code is RabbitMQ.
-REM
-REM  The Initial Developer of the Original Code is GoPivotal, Inc.
-REM  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!RABBITMQ_SERVICENAME!"=="" (
-    set RABBITMQ_SERVICENAME=RabbitMQ
-)
-
-if "!RABBITMQ_BASE!"=="" (
-    set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME!
-)
-
-if not exist "!ERLANG_HOME!\bin\erl.exe" (
-    echo.
-    echo ******************************
-    echo ERLANG_HOME not set correctly.
-    echo ******************************
-    echo.
-    echo Please either set ERLANG_HOME to point to your Erlang installation or place the
-    echo RabbitMQ server distribution in the Erlang lib folder.
-    echo.
-    exit /B
-)
-
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
-    set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-)
-
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
-    set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
-)
-
-"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden -sname rabbitmq-plugins!RANDOM!!TIME:~9! -s rabbit_plugins_main -enabled_plugins_file "!RABBITMQ_ENABLED_PLUGINS_FILE!" -plugins_dist_dir "!RABBITMQ_PLUGINS_DIR:\=/!" -extra !STAR!
-
-endlocal
-endlocal
+@echo off\r
+\r
+REM  The contents of this file are subject to the Mozilla Public License\r
+REM  Version 1.1 (the "License"); you may not use this file except in\r
+REM  compliance with the License. You may obtain a copy of the License\r
+REM  at http://www.mozilla.org/MPL/\r
+REM\r
+REM  Software distributed under the License is distributed on an "AS IS"\r
+REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See\r
+REM  the License for the specific language governing rights and\r
+REM  limitations under the License.\r
+REM\r
+REM  The Original Code is RabbitMQ.\r
+REM\r
+REM  The Initial Developer of the Original Code is GoPivotal, Inc.\r
+REM  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.\r
+REM\r
+\r
+setlocal\r
+\r
+rem Preserve values that might contain exclamation marks before\r
+rem enabling delayed expansion\r
+set TDP0=%~dp0\r
+set STAR=%*\r
+setlocal enabledelayedexpansion\r
+\r
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>\r
+REM Non-empty defaults should be set in rabbitmq-env\r
+call "!TDP0!\rabbitmq-env.bat"\r
+\r
+if not exist "!ERLANG_HOME!\bin\erl.exe" (\r
+    echo.\r
+    echo ******************************\r
+    echo ERLANG_HOME not set correctly.\r
+    echo ******************************\r
+    echo.\r
+    echo Please either set ERLANG_HOME to point to your Erlang installation or place the\r
+    echo RabbitMQ server distribution in the Erlang lib folder.\r
+    echo.\r
+    exit /B 1\r
+)\r
+\r
+"!ERLANG_HOME!\bin\erl.exe" ^\r
+-pa "!TDP0!..\ebin" ^\r
+-noinput ^\r
+-hidden ^\r
+!RABBITMQ_CTL_ERL_ARGS! ^\r
+-s rabbit_plugins_main ^\r
+-enabled_plugins_file "!RABBITMQ_ENABLED_PLUGINS_FILE!" ^\r
+-plugins_dist_dir "!RABBITMQ_PLUGINS_DIR:\=/!" ^\r
+-nodename !RABBITMQ_NODENAME! ^\r
+-extra !STAR!\r
+\r
+endlocal\r
+endlocal\r
index bd397441a9c49f07a3b3f10f71b3126bbbefe6f4..1800b8713c1e4fb4b0778b1f1649cd82db65c70a 100755 (executable)
 ##  The Original Code is RabbitMQ.
 ##
 ##  The Initial Developer of the Original Code is GoPivotal, Inc.
-##  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+##  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 ##
 
 # Get default settings with user overrides for (RABBITMQ_)<var_name>
 # Non-empty defaults should be set in rabbitmq-env
 . `dirname $0`/rabbitmq-env
 
-##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-
-DEFAULT_NODE_IP_ADDRESS=auto
-DEFAULT_NODE_PORT=5672
-[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
-[ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
-
-[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
-[ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
-
-[ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
-[ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${DEFAULT_NODE_PORT} + 20000))
-[ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${RABBITMQ_NODE_PORT} + 20000))
-
-[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
-[ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
-[ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
-[ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
-[ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
-[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
-
-[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
-[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
-
-[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
-[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
-
-[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
-[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
-
-[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
-
-[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
-
-## Log rotation
-[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
-[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
-[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
-[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
-
-##--- End of overridden <var_name> variables
-
 RABBITMQ_START_RABBIT=
 [ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT=" -noinput"
-[ "x" = "x$RABBITMQ_NODE_ONLY" ] && RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -s rabbit boot "
+[ "x" = "x$RABBITMQ_NODE_ONLY" ] && RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -s $RABBITMQ_BOOT_MODULE boot "
 
 case "$(uname -s)" in
   CYGWIN*) # we make no attempt to record the cygwin pid; rabbitmqctl wait
@@ -96,7 +54,7 @@ RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \
     -noinput \
     -hidden \
     -s rabbit_prelaunch \
-    -sname rabbitmqprelaunch$$ \
+    ${RABBITMQ_NAME_TYPE} rabbitmqprelaunch$$ \
     -extra "${RABBITMQ_NODENAME}"
 
 PRELAUNCH_RESULT=$?
@@ -118,24 +76,47 @@ RABBITMQ_CONFIG_ARG=
 RABBITMQ_LISTEN_ARG=
 [ "x" != "x$RABBITMQ_NODE_PORT" ] && [ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_LISTEN_ARG="-rabbit tcp_listeners [{\""${RABBITMQ_NODE_IP_ADDRESS}"\","${RABBITMQ_NODE_PORT}"}]"
 
+# If $RABBITMQ_LOGS is '-', send all log messages to stdout. Likewise
+# for RABBITMQ_SASL_LOGS. This is particularily useful for Docker
+# images.
+
+if [ "$RABBITMQ_LOGS" = '-' ]; then
+    RABBIT_ERROR_LOGGER='tty'
+else
+    RABBIT_ERROR_LOGGER='{file,"'${RABBITMQ_LOGS}'"}'
+fi
+
+if [ "$RABBITMQ_SASL_LOGS" = '-' ]; then
+    SASL_ERROR_LOGGER=tty
+    RABBIT_SASL_ERROR_LOGGER='tty'
+else
+    SASL_ERROR_LOGGER=false
+    RABBIT_SASL_ERROR_LOGGER='{file,"'${RABBITMQ_SASL_LOGS}'"}'
+fi
+
 # we need to turn off path expansion because some of the vars, notably
 # RABBITMQ_SERVER_ERL_ARGS, contain terms that look like globs and
 # there is no other way of preventing their expansion.
 set -f
 
+RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
 exec ${ERL_DIR}erl \
     -pa ${RABBITMQ_EBIN_ROOT} \
     ${RABBITMQ_START_RABBIT} \
-    -sname ${RABBITMQ_NODENAME} \
+    ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \
     -boot "${SASL_BOOT_FILE}" \
     ${RABBITMQ_CONFIG_ARG} \
     +W w \
+    +A ${RABBITMQ_IO_THREAD_POOL_SIZE} \
     ${RABBITMQ_SERVER_ERL_ARGS} \
+    +K true \
+    -kernel inet_default_connect_options "[{nodelay,true}]" \
+    ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \
     ${RABBITMQ_LISTEN_ARG} \
     -sasl errlog_type error \
-    -sasl sasl_error_logger false \
-    -rabbit error_logger '{file,"'${RABBITMQ_LOGS}'"}' \
-    -rabbit sasl_error_logger '{file,"'${RABBITMQ_SASL_LOGS}'"}' \
+    -sasl sasl_error_logger "$SASL_ERROR_LOGGER" \
+    -rabbit error_logger "$RABBIT_ERROR_LOGGER" \
+    -rabbit sasl_error_logger "$RABBIT_SASL_ERROR_LOGGER" \
     -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \
     -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \
     -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \
index 3f195c7b7342021989f09b4b4e0b642e9dab3b66..8f75a486ec2a6a58dff4e1e8c6f57353f9d1c443 100755 (executable)
-@echo off
-REM  The contents of this file are subject to the Mozilla Public License
-REM  Version 1.1 (the "License"); you may not use this file except in
-REM  compliance with the License. You may obtain a copy of the License
-REM  at http://www.mozilla.org/MPL/
-REM
-REM  Software distributed under the License is distributed on an "AS IS"
-REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM  the License for the specific language governing rights and
-REM  limitations under the License.
-REM
-REM  The Original Code is RabbitMQ.
-REM
-REM  The Initial Developer of the Original Code is GoPivotal, Inc.
-REM  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!RABBITMQ_BASE!"=="" (
-    set RABBITMQ_BASE=!APPDATA!\RabbitMQ
-)
-
-if "!COMPUTERNAME!"=="" (
-    set COMPUTERNAME=localhost
-)
-
-if "!RABBITMQ_NODENAME!"=="" (
-    set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
-)
-
-if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
-   if not "!RABBITMQ_NODE_PORT!"=="" (
-      set RABBITMQ_NODE_IP_ADDRESS=auto
-   )
-) else (
-   if "!RABBITMQ_NODE_PORT!"=="" (
-      set RABBITMQ_NODE_PORT=5672
-   )
-)
-
-if "!RABBITMQ_DIST_PORT!"=="" (
-   if "!RABBITMQ_NODE_PORT!"=="" (
-      set RABBITMQ_DIST_PORT=25672
-   ) else (
-      set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
-   )
-)
-
-if not exist "!ERLANG_HOME!\bin\erl.exe" (
-    echo.
-    echo ******************************
-    echo ERLANG_HOME not set correctly.
-    echo ******************************
-    echo.
-    echo Please either set ERLANG_HOME to point to your Erlang installation or place the
-    echo RabbitMQ server distribution in the Erlang lib folder.
-    echo.
-    exit /B
-)
-
-if "!RABBITMQ_MNESIA_BASE!"=="" (
-    set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!/db
-)
-if "!RABBITMQ_LOG_BASE!"=="" (
-    set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!/log
-)
-
-
-rem We save the previous logs in their respective backup
-rem Log management (rotation, filtering based of size...) is left as an exercice for the user.
-
-set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
-set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
-
-rem End of log management
-
-
-if "!RABBITMQ_MNESIA_DIR!"=="" (
-    set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
-)
-
-if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
-    set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
-)
-
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
-    set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-)
-
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
-    set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
-)
-
-set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin
-
-"!ERLANG_HOME!\bin\erl.exe" ^
-        -pa "!RABBITMQ_EBIN_ROOT!" ^
-        -noinput -hidden ^
-        -s rabbit_prelaunch ^
-        -sname rabbitmqprelaunch!RANDOM!!TIME:~9! ^
-        -extra "!RABBITMQ_NODENAME!"
-
-if ERRORLEVEL 2 (
-    rem dist port mentioned in config, do not attempt to set it
-) else if ERRORLEVEL 1 (
-    exit /B 1
-) else (
-    set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
-)
-
-set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!"
-
-if "!RABBITMQ_CONFIG_FILE!"=="" (
-    set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
-)
-
-if exist "!RABBITMQ_CONFIG_FILE!.config" (
-    set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"
-) else (
-    set RABBITMQ_CONFIG_ARG=
-)
-
-set RABBITMQ_LISTEN_ARG=
-if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
-   if not "!RABBITMQ_NODE_PORT!"=="" (
-      set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners [{\""!RABBITMQ_NODE_IP_ADDRESS!"\","!RABBITMQ_NODE_PORT!"}]
-   )
-)
-
-"!ERLANG_HOME!\bin\erl.exe" ^
--pa "!RABBITMQ_EBIN_ROOT!" ^
--noinput ^
--boot start_sasl ^
--s rabbit boot ^
-!RABBITMQ_CONFIG_ARG! ^
--sname !RABBITMQ_NODENAME! ^
-+W w ^
-+A30 ^
-+P 1048576 ^
--kernel inet_default_connect_options "[{nodelay, true}]" ^
-!RABBITMQ_LISTEN_ARG! ^
-!RABBITMQ_SERVER_ERL_ARGS! ^
--sasl errlog_type error ^
--sasl sasl_error_logger false ^
--rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
--rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
--rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
--rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
--rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
--os_mon start_cpu_sup false ^
--os_mon start_disksup false ^
--os_mon start_memsup false ^
--mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
-!RABBITMQ_SERVER_START_ARGS! ^
-!RABBITMQ_DIST_ARG! ^
-!STAR!
-
-endlocal
-endlocal
+@echo off\r
+REM  The contents of this file are subject to the Mozilla Public License\r
+REM  Version 1.1 (the "License"); you may not use this file except in\r
+REM  compliance with the License. You may obtain a copy of the License\r
+REM  at http://www.mozilla.org/MPL/\r
+REM\r
+REM  Software distributed under the License is distributed on an "AS IS"\r
+REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See\r
+REM  the License for the specific language governing rights and\r
+REM  limitations under the License.\r
+REM\r
+REM  The Original Code is RabbitMQ.\r
+REM\r
+REM  The Initial Developer of the Original Code is GoPivotal, Inc.\r
+REM  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.\r
+REM\r
+\r
+setlocal\r
+\r
+rem Preserve values that might contain exclamation marks before\r
+rem enabling delayed expansion\r
+set TDP0=%~dp0\r
+set STAR=%*\r
+setlocal enabledelayedexpansion\r
+\r
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>\r
+REM Non-empty defaults should be set in rabbitmq-env\r
+call "%TDP0%\rabbitmq-env.bat"\r
+\r
+if not exist "!ERLANG_HOME!\bin\erl.exe" (\r
+    echo.\r
+    echo ******************************\r
+    echo ERLANG_HOME not set correctly.\r
+    echo ******************************\r
+    echo.\r
+    echo Please either set ERLANG_HOME to point to your Erlang installation or place the\r
+    echo RabbitMQ server distribution in the Erlang lib folder.\r
+    echo.\r
+    exit /B 1\r
+)\r
+\r
+set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin\r
+\r
+"!ERLANG_HOME!\bin\erl.exe" ^\r
+        -pa "!RABBITMQ_EBIN_ROOT!" ^\r
+        -noinput -hidden ^\r
+        -s rabbit_prelaunch ^\r
+        !RABBITMQ_NAME_TYPE! rabbitmqprelaunch!RANDOM!!TIME:~9! ^\r
+        -extra "!RABBITMQ_NODENAME!"\r
+\r
+if ERRORLEVEL 2 (\r
+    rem dist port mentioned in config, do not attempt to set it\r
+) else if ERRORLEVEL 1 (\r
+    exit /B 1\r
+) else (\r
+    set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!\r
+)\r
+\r
+set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!"\r
+\r
+if exist "!RABBITMQ_CONFIG_FILE!.config" (\r
+    set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"\r
+) else (\r
+    set RABBITMQ_CONFIG_ARG=\r
+)\r
+\r
+set RABBITMQ_LISTEN_ARG=\r
+if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (\r
+   if not "!RABBITMQ_NODE_PORT!"=="" (\r
+      set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners [{\""!RABBITMQ_NODE_IP_ADDRESS!"\","!RABBITMQ_NODE_PORT!"}]\r
+   )\r
+)\r
+\r
+set RABBITMQ_START_RABBIT=\r
+if "!RABBITMQ_NODE_ONLY!"=="" (\r
+    set RABBITMQ_START_RABBIT=-s "!RABBITMQ_BOOT_MODULE!" boot\r
+)\r
+\r
+if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (\r
+    set RABBITMQ_IO_THREAD_POOL_ARG=30\r
+)\r
+\r
+"!ERLANG_HOME!\bin\erl.exe" ^\r
+-pa "!RABBITMQ_EBIN_ROOT!" ^\r
+-noinput ^\r
+-boot start_sasl ^\r
+!RABBITMQ_START_RABBIT! ^\r
+!RABBITMQ_CONFIG_ARG! ^\r
+!RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^\r
++W w ^\r
++A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^\r
++P 1048576 ^\r
+!RABBITMQ_LISTEN_ARG! ^\r
+!RABBITMQ_SERVER_ERL_ARGS! ^\r
+-kernel inet_default_connect_options "[{nodelay, true}]" ^\r
+!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^\r
+-sasl errlog_type error ^\r
+-sasl sasl_error_logger false ^\r
+-rabbit error_logger {file,\""!LOGS:\=/!"\"} ^\r
+-rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^\r
+-rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^\r
+-rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^\r
+-rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^\r
+-os_mon start_cpu_sup false ^\r
+-os_mon start_disksup false ^\r
+-os_mon start_memsup false ^\r
+-mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^\r
+!RABBITMQ_SERVER_START_ARGS! ^\r
+!RABBITMQ_DIST_ARG! ^\r
+!STAR!\r
+\r
+endlocal\r
+endlocal\r
index 3b1b127b3796f485c5b494b933ebe2ec5386c9bd..d6dd902ee9c82af1c39fbe43609e8e1797cba155 100755 (executable)
-@echo off
-REM  The contents of this file are subject to the Mozilla Public License
-REM  Version 1.1 (the "License"); you may not use this file except in
-REM  compliance with the License. You may obtain a copy of the License
-REM  at http://www.mozilla.org/MPL/
-REM
-REM  Software distributed under the License is distributed on an "AS IS"
-REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM  the License for the specific language governing rights and
-REM  limitations under the License.
-REM
-REM  The Original Code is RabbitMQ.
-REM
-REM  The Initial Developer of the Original Code is GoPivotal, Inc.
-REM  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TN0=%~n0
-set TDP0=%~dp0
-set P1=%1
-setlocal enabledelayedexpansion
-
-set STARVAR=
-shift
-:loop1
-if "%1"=="" goto after_loop
-       set STARVAR=%STARVAR% %1
-       shift
-goto loop1
-:after_loop
-
-if "!RABBITMQ_SERVICENAME!"=="" (
-    set RABBITMQ_SERVICENAME=RabbitMQ
-)
-
-if "!RABBITMQ_BASE!"=="" (
-    set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME!
-)
-
-if "!COMPUTERNAME!"=="" (
-    set COMPUTERNAME=localhost
-)
-
-if "!RABBITMQ_NODENAME!"=="" (
-    set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
-)
-
-if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
-    if not "!RABBITMQ_NODE_PORT!"=="" (
-       set RABBITMQ_NODE_IP_ADDRESS=auto
-    )
-) else (
-    if "!RABBITMQ_NODE_PORT!"=="" (
-       set RABBITMQ_NODE_PORT=5672
-    )
-)
-
-if "!RABBITMQ_DIST_PORT!"=="" (
-   if "!RABBITMQ_NODE_PORT!"=="" (
-      set RABBITMQ_DIST_PORT=25672
-   ) else (
-      set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
-   )
-)
-
-if "!ERLANG_SERVICE_MANAGER_PATH!"=="" (
-    if not exist "!ERLANG_HOME!\bin\erl.exe" (
-        echo.
-        echo ******************************
-        echo ERLANG_HOME not set correctly.
-        echo ******************************
-        echo.
-        echo Please either set ERLANG_HOME to point to your Erlang installation or place the
-        echo RabbitMQ server distribution in the Erlang lib folder.
-        echo.
-        exit /B
-    )
-    for /f "delims=" %%i in ('dir /ad/b "!ERLANG_HOME!"') do if exist "!ERLANG_HOME!\%%i\bin\erlsrv.exe" (
-        set ERLANG_SERVICE_MANAGER_PATH=!ERLANG_HOME!\%%i\bin
-    )
-)
-
-set CONSOLE_FLAG=
-set CONSOLE_LOG_VALID=
-for %%i in (new reuse) do if "%%i" == "!RABBITMQ_CONSOLE_LOG!" set CONSOLE_LOG_VALID=TRUE
-if "!CONSOLE_LOG_VALID!" == "TRUE" (
-    set CONSOLE_FLAG=-debugtype !RABBITMQ_CONSOLE_LOG!
-)
-
-rem *** End of configuration ***
-
-if not exist "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" (
-    echo.
-    echo **********************************************
-    echo ERLANG_SERVICE_MANAGER_PATH not set correctly.
-    echo **********************************************
-    echo.
-    echo "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" not found
-    echo Please set ERLANG_SERVICE_MANAGER_PATH to the folder containing "erlsrv.exe".
-    echo.
-    exit /B 1
-)
-
-if "!RABBITMQ_MNESIA_BASE!"=="" (
-    set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!/db
-)
-if "!RABBITMQ_LOG_BASE!"=="" (
-    set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!/log
-)
-
-
-rem We save the previous logs in their respective backup
-rem Log management (rotation, filtering based on size...) is left as an exercise for the user.
-
-set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
-set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
-
-rem End of log management
-
-
-if "!RABBITMQ_MNESIA_DIR!"=="" (
-    set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
-)
-
-if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
-    set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
-)
-
-if "!P1!" == "install" goto INSTALL_SERVICE
-for %%i in (start stop disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE
-
-echo.
-echo *********************
-echo Service control usage
-echo *********************
-echo.
-echo !TN0! help    - Display this help
-echo !TN0! install - Install the !RABBITMQ_SERVICENAME! service
-echo !TN0! remove  - Remove the !RABBITMQ_SERVICENAME! service
-echo.
-echo The following actions can also be accomplished by using
-echo Windows Services Management Console (services.msc):
-echo.
-echo !TN0! start   - Start the !RABBITMQ_SERVICENAME! service
-echo !TN0! stop    - Stop the !RABBITMQ_SERVICENAME! service
-echo !TN0! disable - Disable the !RABBITMQ_SERVICENAME! service
-echo !TN0! enable  - Enable the !RABBITMQ_SERVICENAME! service
-echo.
-exit /B
-
-
-:INSTALL_SERVICE
-
-if not exist "!RABBITMQ_BASE!" (
-    echo Creating base directory !RABBITMQ_BASE! & md "!RABBITMQ_BASE!"
-)
-
-"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL
-if errorlevel 1 (
-    "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!
-) else (
-    echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters
-)
-
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
-    set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-)
-
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
-    set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
-)
-
-set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin
-
-if "!RABBITMQ_CONFIG_FILE!"=="" (
-    set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
-)
-
-"!ERLANG_HOME!\bin\erl.exe" ^
-        -pa "!RABBITMQ_EBIN_ROOT!" ^
-        -noinput -hidden ^
-        -s rabbit_prelaunch ^
-        -sname rabbitmqprelaunch!RANDOM!!TIME:~9!
-
-if ERRORLEVEL 3 (
-    rem ERRORLEVEL means (or greater) so we need to catch all other failure
-    rem cases here
-    exit /B 1
-) else if ERRORLEVEL 2 (
-    rem dist port mentioned in config, do not attempt to set it
-) else if ERRORLEVEL 1 (
-    exit /B 1
-) else (
-    set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
-)
-
-if exist "!RABBITMQ_CONFIG_FILE!.config" (
-    set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"
-) else (
-    set RABBITMQ_CONFIG_ARG=
-)
-
-set RABBITMQ_LISTEN_ARG=
-if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
-   if not "!RABBITMQ_NODE_PORT!"=="" (
-      set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners "[{\"!RABBITMQ_NODE_IP_ADDRESS!\", !RABBITMQ_NODE_PORT!}]"
-   )
-)
-
-set ERLANG_SERVICE_ARGUMENTS= ^
--pa "!RABBITMQ_EBIN_ROOT!" ^
--boot start_sasl ^
--s rabbit boot ^
-!RABBITMQ_CONFIG_ARG! ^
-+W w ^
-+A30 ^
-+P 1048576 ^
--kernel inet_default_connect_options "[{nodelay,true}]" ^
-!RABBITMQ_LISTEN_ARG! ^
-!RABBITMQ_SERVER_ERL_ARGS! ^
--sasl errlog_type error ^
--sasl sasl_error_logger false ^
--rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
--rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
--rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
--rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
--rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
--rabbit windows_service_config \""!RABBITMQ_CONFIG_FILE:\=/!"\" ^
--os_mon start_cpu_sup false ^
--os_mon start_disksup false ^
--os_mon start_memsup false ^
--mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
-!RABBITMQ_SERVER_START_ARGS! ^
-!RABBITMQ_DIST_ARG! ^
-!STARVAR!
-
-set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!
-set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!
-
-"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^
--machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^
--env ERL_CRASH_DUMP="!RABBITMQ_BASE:\=/!/erl_crash.dump" ^
--workdir "!RABBITMQ_BASE!" ^
--stopaction "rabbit:stop_and_halt()." ^
--sname !RABBITMQ_NODENAME! ^
-!CONSOLE_FLAG! ^
--comment "A robust and scalable messaging broker" ^
--args "!ERLANG_SERVICE_ARGUMENTS!" > NUL
-
-goto END
-
-
-:MODIFY_SERVICE
-
-"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" !P1! !RABBITMQ_SERVICENAME!
-goto END
-
-
-:END
-
-endlocal
-endlocal
+@echo off\r
+REM  The contents of this file are subject to the Mozilla Public License\r
+REM  Version 1.1 (the "License"); you may not use this file except in\r
+REM  compliance with the License. You may obtain a copy of the License\r
+REM  at http://www.mozilla.org/MPL/\r
+REM\r
+REM  Software distributed under the License is distributed on an "AS IS"\r
+REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See\r
+REM  the License for the specific language governing rights and\r
+REM  limitations under the License.\r
+REM\r
+REM  The Original Code is RabbitMQ.\r
+REM\r
+REM  The Initial Developer of the Original Code is GoPivotal, Inc.\r
+REM  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.\r
+REM\r
+\r
+setlocal\r
+\r
+rem Preserve values that might contain exclamation marks before\r
+rem enabling delayed expansion\r
+set TN0=%~n0\r
+set TDP0=%~dp0\r
+set P1=%1\r
+setlocal enabledelayedexpansion\r
+\r
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>\r
+REM Non-empty defaults should be set in rabbitmq-env\r
+call "%TDP0%\rabbitmq-env.bat"\r
+\r
+set STARVAR=\r
+shift\r
+:loop1\r
+if "%1"=="" goto after_loop\r
+       set STARVAR=%STARVAR% %1\r
+       shift\r
+goto loop1\r
+:after_loop\r
+\r
+if "!ERLANG_SERVICE_MANAGER_PATH!"=="" (\r
+    if not exist "!ERLANG_HOME!\bin\erl.exe" (\r
+        echo.\r
+        echo ******************************\r
+        echo ERLANG_HOME not set correctly.\r
+        echo ******************************\r
+        echo.\r
+        echo Please either set ERLANG_HOME to point to your Erlang installation or place the\r
+        echo RabbitMQ server distribution in the Erlang lib folder.\r
+        echo.\r
+        exit /B\r
+    )\r
+    for /f "delims=" %%i in ('dir /ad/b "!ERLANG_HOME!"') do if exist "!ERLANG_HOME!\%%i\bin\erlsrv.exe" (\r
+        set ERLANG_SERVICE_MANAGER_PATH=!ERLANG_HOME!\%%i\bin\r
+    )\r
+)\r
+\r
+set CONSOLE_FLAG=\r
+set CONSOLE_LOG_VALID=\r
+for %%i in (new reuse) do if "%%i" == "!RABBITMQ_CONSOLE_LOG!" set CONSOLE_LOG_VALID=TRUE\r
+if "!CONSOLE_LOG_VALID!" == "TRUE" (\r
+    set CONSOLE_FLAG=-debugtype !RABBITMQ_CONSOLE_LOG!\r
+)\r
+\r
+rem *** End of configuration ***\r
+\r
+if not exist "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" (\r
+    echo.\r
+    echo **********************************************\r
+    echo ERLANG_SERVICE_MANAGER_PATH not set correctly.\r
+    echo **********************************************\r
+    echo.\r
+    echo "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" not found\r
+    echo Please set ERLANG_SERVICE_MANAGER_PATH to the folder containing "erlsrv.exe".\r
+    echo.\r
+    exit /B 1\r
+)\r
+\r
+if "!P1!" == "install" goto INSTALL_SERVICE\r
+for %%i in (start stop disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE\r
+\r
+echo.\r
+echo *********************\r
+echo Service control usage\r
+echo *********************\r
+echo.\r
+echo !TN0! help    - Display this help\r
+echo !TN0! install - Install the !RABBITMQ_SERVICENAME! service\r
+echo !TN0! remove  - Remove the !RABBITMQ_SERVICENAME! service\r
+echo.\r
+echo The following actions can also be accomplished by using\r
+echo Windows Services Management Console (services.msc):\r
+echo.\r
+echo !TN0! start   - Start the !RABBITMQ_SERVICENAME! service\r
+echo !TN0! stop    - Stop the !RABBITMQ_SERVICENAME! service\r
+echo !TN0! disable - Disable the !RABBITMQ_SERVICENAME! service\r
+echo !TN0! enable  - Enable the !RABBITMQ_SERVICENAME! service\r
+echo.\r
+exit /B\r
+\r
+\r
+:INSTALL_SERVICE\r
+\r
+if not exist "!RABBITMQ_BASE!" (\r
+    echo Creating base directory !RABBITMQ_BASE! & md "!RABBITMQ_BASE!"\r
+)\r
+\r
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL\r
+if errorlevel 1 (\r
+    "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!\r
+) else (\r
+    echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters\r
+)\r
+\r
+set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin\r
+\r
+"!ERLANG_HOME!\bin\erl.exe" ^\r
+        -pa "!RABBITMQ_EBIN_ROOT!" ^\r
+        -noinput -hidden ^\r
+        -s rabbit_prelaunch ^\r
+        !RABBITMQ_NAME_TYPE! rabbitmqprelaunch!RANDOM!!TIME:~9!\r
+\r
+if ERRORLEVEL 3 (\r
+    rem ERRORLEVEL means (or greater) so we need to catch all other failure\r
+    rem cases here\r
+    exit /B 1\r
+) else if ERRORLEVEL 2 (\r
+    rem dist port mentioned in config, do not attempt to set it\r
+) else if ERRORLEVEL 1 (\r
+    exit /B 1\r
+) else (\r
+    set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!\r
+)\r
+\r
+if exist "!RABBITMQ_CONFIG_FILE!.config" (\r
+    set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"\r
+) else (\r
+    set RABBITMQ_CONFIG_ARG=\r
+)\r
+\r
+set RABBITMQ_LISTEN_ARG=\r
+if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (\r
+   if not "!RABBITMQ_NODE_PORT!"=="" (\r
+      set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners "[{\"!RABBITMQ_NODE_IP_ADDRESS!\", !RABBITMQ_NODE_PORT!}]"\r
+   )\r
+)\r
+\r
+set RABBITMQ_START_RABBIT=\r
+if "!RABBITMQ_NODE_ONLY!"=="" (\r
+    set RABBITMQ_START_RABBIT=-s rabbit boot\r
+)\r
+\r
+if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (\r
+    set RABBITMQ_IO_THREAD_POOL_SIZE=30\r
+)\r
+\r
+set ERLANG_SERVICE_ARGUMENTS= ^\r
+-pa "!RABBITMQ_EBIN_ROOT!" ^\r
+-boot start_sasl ^\r
+!RABBITMQ_START_RABBIT! ^\r
+!RABBITMQ_CONFIG_ARG! ^\r
++W w ^\r
++A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^\r
++P 1048576 ^\r
+-kernel inet_default_connect_options "[{nodelay,true}]" ^\r
+!RABBITMQ_LISTEN_ARG! ^\r
+!RABBITMQ_SERVER_ERL_ARGS! ^\r
+-sasl errlog_type error ^\r
+-sasl sasl_error_logger false ^\r
+-rabbit error_logger {file,\""!LOGS:\=/!"\"} ^\r
+-rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^\r
+-rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^\r
+-rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^\r
+-rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^\r
+-rabbit windows_service_config \""!RABBITMQ_CONFIG_FILE:\=/!"\" ^\r
+-os_mon start_cpu_sup false ^\r
+-os_mon start_disksup false ^\r
+-os_mon start_memsup false ^\r
+-mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^\r
+!RABBITMQ_SERVER_START_ARGS! ^\r
+!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^\r
+!RABBITMQ_DIST_ARG! ^\r
+!STARVAR!\r
+\r
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!\r
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!\r
+\r
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^\r
+-machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^\r
+-env ERL_CRASH_DUMP="!RABBITMQ_BASE:\=/!/erl_crash.dump" ^\r
+-workdir "!RABBITMQ_BASE!" ^\r
+-stopaction "rabbit:stop_and_halt()." ^\r
+!RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^\r
+!CONSOLE_FLAG! ^\r
+-comment "A robust and scalable messaging broker" ^\r
+-args "!ERLANG_SERVICE_ARGUMENTS!" > NUL\r
+\r
+goto END\r
+\r
+\r
+:MODIFY_SERVICE\r
+\r
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" !P1! !RABBITMQ_SERVICENAME!\r
+goto END\r
+\r
+\r
+:END\r
+\r
+endlocal\r
+endlocal\r
index 309abf2a0d5ea46a49708195b90baf4da1858443..03f8765e27b6b679a635db088a66181c14200d70 100755 (executable)
 ##  The Original Code is RabbitMQ.
 ##
 ##  The Initial Developer of the Original Code is GoPivotal, Inc.
-##  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+##  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 ##
 
 # Get default settings with user overrides for (RABBITMQ_)<var_name>
 # Non-empty defaults should be set in rabbitmq-env
 . `dirname $0`/rabbitmq-env
 
-##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-
-[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
-[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
-
-##--- End of overridden <var_name> variables
-
+# We specify Mnesia dir and sasl error logger since some actions
+# (e.g. forget_cluster_node --offline) require us to impersonate the
+# real node.
+RABBITMQ_USE_LONGNAME=${RABBITMQ_USE_LONGNAME} \
 exec ${ERL_DIR}erl \
     -pa "${RABBITMQ_HOME}/ebin" \
     -noinput \
     -hidden \
     ${RABBITMQ_CTL_ERL_ARGS} \
-    -sname rabbitmqctl$$ \
     -boot "${CLEAN_BOOT_FILE}" \
+    -sasl errlog_type error \
+    -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \
     -s rabbit_control_main \
     -nodename $RABBITMQ_NODENAME \
     -extra "$@"
index 313b698cc6b0adf9a177a88e571be3d192e8a2d2..45e2929579fb672e1a9616ed7438e5c68136d8cf 100755 (executable)
@@ -1,49 +1,55 @@
-@echo off
-REM  The contents of this file are subject to the Mozilla Public License
-REM  Version 1.1 (the "License"); you may not use this file except in
-REM  compliance with the License. You may obtain a copy of the License
-REM  at http://www.mozilla.org/MPL/
-REM
-REM  Software distributed under the License is distributed on an "AS IS"
-REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM  the License for the specific language governing rights and
-REM  limitations under the License.
-REM
-REM  The Original Code is RabbitMQ.
-REM
-REM  The Initial Developer of the Original Code is GoPivotal, Inc.
-REM  Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!COMPUTERNAME!"=="" (
-    set COMPUTERNAME=localhost
-)
-
-if "!RABBITMQ_NODENAME!"=="" (
-    set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
-)
-
-if not exist "!ERLANG_HOME!\bin\erl.exe" (
-    echo.
-    echo ******************************
-    echo ERLANG_HOME not set correctly.
-    echo ******************************
-    echo.
-    echo Please either set ERLANG_HOME to point to your Erlang installation or place the
-    echo RabbitMQ server distribution in the Erlang lib folder.
-    echo.
-    exit /B
-)
-
-"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden !RABBITMQ_CTL_ERL_ARGS! -sname rabbitmqctl!RANDOM!!TIME:~9! -s rabbit_control_main -nodename !RABBITMQ_NODENAME! -extra !STAR!
-
-endlocal
-endlocal
+@echo off\r
+REM  The contents of this file are subject to the Mozilla Public License\r
+REM  Version 1.1 (the "License"); you may not use this file except in\r
+REM  compliance with the License. You may obtain a copy of the License\r
+REM  at http://www.mozilla.org/MPL/\r
+REM\r
+REM  Software distributed under the License is distributed on an "AS IS"\r
+REM  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See\r
+REM  the License for the specific language governing rights and\r
+REM  limitations under the License.\r
+REM\r
+REM  The Original Code is RabbitMQ.\r
+REM\r
+REM  The Initial Developer of the Original Code is GoPivotal, Inc.\r
+REM  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.\r
+REM\r
+\r
+REM Scopes the variables to the current batch file\r
+setlocal\r
+\r
+rem Preserve values that might contain exclamation marks before\r
+rem enabling delayed expansion\r
+set TDP0=%~dp0\r
+set STAR=%*\r
+setlocal enabledelayedexpansion\r
+\r
+if not exist "!ERLANG_HOME!\bin\erl.exe" (\r
+    echo.\r
+    echo ******************************\r
+    echo ERLANG_HOME not set correctly.\r
+    echo ******************************\r
+    echo.\r
+    echo Please either set ERLANG_HOME to point to your Erlang installation or place the\r
+    echo RabbitMQ server distribution in the Erlang lib folder.\r
+    echo.\r
+    exit /B 1\r
+)\r
+\r
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>\r
+REM Non-empty defaults should be set in rabbitmq-env\r
+call "%TDP0%\rabbitmq-env.bat"\r
+\r
+"!ERLANG_HOME!\bin\erl.exe" ^\r
+-pa "!TDP0!..\ebin" ^\r
+-noinput ^\r
+-hidden ^\r
+!RABBITMQ_CTL_ERL_ARGS! ^\r
+-sasl errlog_type error ^\r
+-mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^\r
+-s rabbit_control_main ^\r
+-nodename !RABBITMQ_NODENAME! ^\r
+-extra !STAR!\r
+\r
+endlocal\r
+endlocal\r
index 0479ce6608df7f45ff0ed3e543c1a6456a118fc8..bab327eab6598fe87d6834fe70f1908f71a9ab4b 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 -module(app_utils).
 
 -export([load_applications/1, start_applications/1, start_applications/2,
          stop_applications/1, stop_applications/2, app_dependency_order/2,
-         wait_for_applications/1]).
+         app_dependencies/1]).
 
 -ifdef(use_specs).
 
@@ -28,8 +28,8 @@
 -spec stop_applications([atom()])                   -> 'ok'.
 -spec start_applications([atom()], error_handler()) -> 'ok'.
 -spec stop_applications([atom()], error_handler())  -> 'ok'.
--spec wait_for_applications([atom()])               -> 'ok'.
 -spec app_dependency_order([atom()], boolean())     -> [digraph:vertex()].
+-spec app_dependencies(atom())                      -> [atom()].
 
 -endif.
 
@@ -68,14 +68,10 @@ stop_applications(Apps, ErrorHandler) ->
                         ErrorHandler,
                         Apps).
 
-
-wait_for_applications(Apps) ->
-    [wait_for_application(App) || App <- Apps], ok.
-
 app_dependency_order(RootApps, StripUnreachable) ->
     {ok, G} = rabbit_misc:build_acyclic_graph(
-                fun (App, _Deps) -> [{App, App}] end,
-                fun (App,  Deps) -> [{Dep, App} || Dep <- Deps] end,
+                fun ({App, _Deps}) -> [{App, App}] end,
+                fun ({App,  Deps}) -> [{Dep, App} || Dep <- Deps] end,
                 [{App, app_dependencies(App)} ||
                     {App, _Desc, _Vsn} <- application:loaded_applications()]),
     try
@@ -92,13 +88,6 @@ app_dependency_order(RootApps, StripUnreachable) ->
 %%---------------------------------------------------------------------------
 %% Private API
 
-wait_for_application(Application) ->
-    case lists:keymember(Application, 1, rabbit_misc:which_applications()) of
-         true  -> ok;
-         false -> timer:sleep(1000),
-                  wait_for_application(Application)
-    end.
-
 load_applications(Worklist, Loaded) ->
     case queue:out(Worklist) of
         {empty, _WorkList} ->
index d30fa89678c2cf8cee142513d245ae030537fa57..0dafde6dc24fabc2271ed731910fc61d879ef859 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(background_gc).
index 3a640df194d0e67dca9af29ce09cd10f294ae68d..6b2417538848c23160a5638595e6dcc1df2226ea 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(credit_flow).
@@ -37,6 +37,8 @@
 
 -ifdef(use_specs).
 
+-export_type([bump_msg/0]).
+
 -opaque(bump_msg() :: {pid(), non_neg_integer()}).
 -type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
 
             put(Key, Expr)
         end).
 
+%% If current process was blocked by credit flow in the last
+%% STATE_CHANGE_INTERVAL milliseconds, state/0 will report it as "in
+%% flow".
+-define(STATE_CHANGE_INTERVAL, 1000000).
+
 %%----------------------------------------------------------------------------
 
 %% There are two "flows" here; of messages and of credit, going in
@@ -115,7 +122,7 @@ state() -> case blocked() of
                false -> case get(credit_blocked_at) of
                             undefined -> running;
                             B         -> Diff = timer:now_diff(erlang:now(), B),
-                                         case Diff < 5000000 of
+                                         case Diff < ?STATE_CHANGE_INTERVAL of
                                              true  -> flow;
                                              false -> running
                                          end
index 378759a64b49015bdb26c7209a4611c8d9ed7dc5..4bf570cc27b9452f9c8bbce49916c4ed9440b3a2 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(delegate).
 
+%% delegate is an alternative way of doing remote calls. Compared to
+%% the rpc module, it reduces inter-node communication. For example,
+%% if a message is routed to 1,000 queues on node A and needs to be
+%% propagated to nodes B and C, it would be nice to avoid doing 2,000
+%% remote casts to queue processes.
+%%
+%% An important issue here is preserving order - we need to make sure
+%% that messages from a certain channel to a certain queue take a
+%% consistent route, to prevent them being reordered. In fact all
+%% AMQP-ish things (such as queue declaration results and basic.get)
+%% must take the same route as well, to ensure that clients see causal
+%% ordering correctly. Therefore we have a rather generic mechanism
+%% here rather than just a message-reflector. That's also why we pick
+%% the delegate process to use based on a hash of the source pid.
+%%
+%% When a function is invoked using delegate:invoke/2, delegate:call/2
+%% or delegate:cast/2 on a group of pids, the pids are first split
+%% into local and remote ones. Remote processes are then grouped by
+%% node. The function is then invoked locally and on every node (using
+%% gen_server2:multi/4) as many times as there are processes on that
+%% node, sequentially.
+%%
+%% Errors returned when executing functions on remote nodes are re-raised
+%% in the caller.
+%%
+%% RabbitMQ starts a pool of delegate processes on boot. The size of
+%% the pool is configurable, the aim is to make sure we don't have too
+%% few delegates and thus limit performance on many-CPU machines.
+
 -behaviour(gen_server2).
 
 -export([start_link/1, invoke_no_result/2, invoke/2,
index cb5ef2b85b3706dac9281cef35f7b5d9482fd517..a285667d62b24956f31399bd77c05a0265c5c36e 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(delegate_sup).
index 89a34c9e2a42fa0e56c17d713e56236d50b4ed3d..0fef3b2d1f16f61d54326258486e6b5ff1ea6158 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% A dual-index tree.
@@ -41,7 +41,7 @@
 
 -export_type([?MODULE/0]).
 
--opaque(?MODULE()  :: {gb_tree(), gb_tree()}).
+-opaque(?MODULE()  :: {gb_trees:tree(), gb_trees:tree()}).
 
 -type(pk()         :: any()).
 -type(sk()         :: any()).
index 3a7a692c5ce11172c46ae67dfeaeb673b9cb70ec..8be19e5be322b919b688348cbe35232929139cc4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(file_handle_cache).
@@ -30,9 +30,9 @@
 %% may happen, especially for writes.
 %% 3) Writes are all appends. You cannot write to the middle of a
 %% file, although you can truncate and then append if you want.
-%% 4) Although there is a write buffer, there is no read buffer. Feel
-%% free to use the read_ahead mode, but beware of the interaction
-%% between that buffer and the write buffer.
+%% 4) There are read and write buffers. Feel free to use the read_ahead
+%% mode, but beware of the interaction between that buffer and the write
+%% buffer.
 %%
 %% Some benefits
 %% 1) You do not have to remember to call sync before close
          copy/3, set_maximum_since_use/1, delete/1, clear/1]).
 -export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2,
          set_limit/1, get_limit/0, info_keys/0, with_handle/1, with_handle/2,
-         info/0, info/1]).
+         info/0, info/1, clear_read_cache/0]).
 -export([ulimit/0]).
 
 -export([start_link/0, start_link/2, init/1, handle_call/3, handle_cast/2,
 -define(CLIENT_ETS_TABLE, file_handle_cache_client).
 -define(ELDERS_ETS_TABLE, file_handle_cache_elders).
 
+-include("rabbit.hrl"). % For #amqqueue record definition.
+
 %%----------------------------------------------------------------------------
 
 -record(file,
           write_buffer_size,
           write_buffer_size_limit,
           write_buffer,
+          read_buffer,
+          read_buffer_pos,
+          read_buffer_rem,        %% Num of bytes from pos to end
+          read_buffer_size,       %% Next size of read buffer to use
+          read_buffer_size_limit, %% Max size of read buffer to use
+          read_buffer_usage,      %% Bytes we have read from it, for tuning
           at_eof,
           path,
           mode,
 -spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok').
 -spec(open/3 ::
         (file:filename(), [any()],
-         [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')}])
+         [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
+          {'read_buffer', (non_neg_integer() | 'unbuffered')}])
         -> val_or_error(ref())).
 -spec(close/1 :: (ref()) -> ok_or_error()).
 -spec(read/2 :: (ref(), non_neg_integer()) ->
@@ -331,16 +340,55 @@ close(Ref) ->
 
 read(Ref, Count) ->
     with_flushed_handles(
-      [Ref],
+      [Ref], keep,
       fun ([#handle { is_read = false }]) ->
               {error, not_open_for_reading};
-          ([Handle = #handle { hdl = Hdl, offset = Offset }]) ->
-              case prim_file:read(Hdl, Count) of
-                  {ok, Data} = Obj -> Offset1 = Offset + iolist_size(Data),
-                                      {Obj,
-                                       [Handle #handle { offset = Offset1 }]};
-                  eof              -> {eof, [Handle #handle { at_eof = true }]};
-                  Error            -> {Error, [Handle]}
+          ([Handle = #handle{read_buffer       = Buf,
+                             read_buffer_pos   = BufPos,
+                             read_buffer_rem   = BufRem,
+                             read_buffer_usage = BufUsg,
+                             offset            = Offset}])
+            when BufRem >= Count ->
+              <<_:BufPos/binary, Res:Count/binary, _/binary>> = Buf,
+              {{ok, Res}, [Handle#handle{offset            = Offset + Count,
+                                         read_buffer_pos   = BufPos + Count,
+                                         read_buffer_rem   = BufRem - Count,
+                                         read_buffer_usage = BufUsg + Count }]};
+          ([Handle0]) ->
+              maybe_reduce_read_cache([Ref]),
+              Handle = #handle{read_buffer      = Buf,
+                               read_buffer_pos  = BufPos,
+                               read_buffer_rem  = BufRem,
+                               read_buffer_size = BufSz,
+                               hdl              = Hdl,
+                               offset           = Offset}
+                  = tune_read_buffer_limit(Handle0, Count),
+              WantedCount = Count - BufRem,
+              case prim_file_read(Hdl, lists:max([BufSz, WantedCount])) of
+                  {ok, Data} ->
+                      <<_:BufPos/binary, BufTl/binary>> = Buf,
+                      ReadCount = size(Data),
+                      case ReadCount < WantedCount of
+                          true ->
+                              OffSet1 = Offset + BufRem + ReadCount,
+                              {{ok, <<BufTl/binary, Data/binary>>},
+                               [reset_read_buffer(
+                                  Handle#handle{offset = OffSet1})]};
+                          false ->
+                              <<Hd:WantedCount/binary, _/binary>> = Data,
+                              OffSet1 = Offset + BufRem + WantedCount,
+                              BufRem1 = ReadCount - WantedCount,
+                              {{ok, <<BufTl/binary, Hd/binary>>},
+                               [Handle#handle{offset            = OffSet1,
+                                              read_buffer       = Data,
+                                              read_buffer_pos   = WantedCount,
+                                              read_buffer_rem   = BufRem1,
+                                              read_buffer_usage = WantedCount}]}
+                      end;
+                  eof ->
+                      {eof, [Handle #handle { at_eof = true }]};
+                  Error ->
+                      {Error, [reset_read_buffer(Handle)]}
               end
       end).
 
@@ -355,7 +403,7 @@ append(Ref, Data) ->
                                             write_buffer_size_limit = 0,
                                             at_eof = true } = Handle1} ->
                       Offset1 = Offset + iolist_size(Data),
-                      {prim_file:write(Hdl, Data),
+                      {prim_file_write(Hdl, Data),
                        [Handle1 #handle { is_dirty = true, offset = Offset1 }]};
                   {{ok, _Offset}, #handle { write_buffer = WriteBuffer,
                                             write_buffer_size = Size,
@@ -377,12 +425,12 @@ append(Ref, Data) ->
 
 sync(Ref) ->
     with_flushed_handles(
-      [Ref],
+      [Ref], keep,
       fun ([#handle { is_dirty = false, write_buffer = [] }]) ->
               ok;
           ([Handle = #handle { hdl = Hdl,
                                is_dirty = true, write_buffer = [] }]) ->
-              case prim_file:sync(Hdl) of
+              case prim_file_sync(Hdl) of
                   ok    -> {ok, [Handle #handle { is_dirty = false }]};
                   Error -> {Error, [Handle]}
               end
@@ -397,7 +445,7 @@ needs_sync(Ref) ->
 
 position(Ref, NewOffset) ->
     with_flushed_handles(
-      [Ref],
+      [Ref], keep,
       fun ([Handle]) -> {Result, Handle1} = maybe_seek(NewOffset, Handle),
                         {Result, [Handle1]}
       end).
@@ -465,8 +513,8 @@ clear(Ref) ->
       fun ([#handle { at_eof = true, write_buffer_size = 0, offset = 0 }]) ->
               ok;
           ([Handle]) ->
-              case maybe_seek(bof, Handle #handle { write_buffer = [],
-                                                    write_buffer_size = 0 }) of
+              case maybe_seek(bof, Handle#handle{write_buffer      = [],
+                                                 write_buffer_size = 0}) of
                   {{ok, 0}, Handle1 = #handle { hdl = Hdl }} ->
                       case prim_file:truncate(Hdl) of
                           ok    -> {ok, [Handle1 #handle { at_eof = true }]};
@@ -535,10 +583,61 @@ info_keys() -> ?INFO_KEYS.
 info() -> info(?INFO_KEYS).
 info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
 
+clear_read_cache() ->
+    gen_server2:cast(?SERVER, clear_read_cache),
+    clear_vhost_read_cache(rabbit_vhost:list()).
+
+clear_vhost_read_cache([]) ->
+    ok;
+clear_vhost_read_cache([VHost | Rest]) ->
+    clear_queue_read_cache(rabbit_amqqueue:list(VHost)),
+    clear_vhost_read_cache(Rest).
+
+clear_queue_read_cache([]) ->
+    ok;
+clear_queue_read_cache([#amqqueue{pid = MPid, slave_pids = SPids} | Rest]) ->
+    %% Limit the action to the current node.
+    Pids = [P || P <- [MPid | SPids], node(P) =:= node()],
+    %% This function is executed in the context of the backing queue
+    %% process because the read buffer is stored in the process
+    %% dictionary.
+    Fun = fun(_, State) ->
+                  clear_process_read_cache(),
+                  State
+          end,
+    [rabbit_amqqueue:run_backing_queue(Pid, rabbit_variable_queue, Fun)
+     || Pid <- Pids],
+    clear_queue_read_cache(Rest).
+
+clear_process_read_cache() ->
+    [
+     begin
+         Handle1 = reset_read_buffer(Handle),
+         put({Ref, fhc_handle}, Handle1)
+     end ||
+        {{Ref, fhc_handle}, Handle} <- get(),
+        size(Handle#handle.read_buffer) > 0
+    ].
+
 %%----------------------------------------------------------------------------
 %% Internal functions
 %%----------------------------------------------------------------------------
 
+prim_file_read(Hdl, Size) ->
+    file_handle_cache_stats:update(
+      io_read, Size, fun() -> prim_file:read(Hdl, Size) end).
+
+prim_file_write(Hdl, Bytes) ->
+    file_handle_cache_stats:update(
+      io_write, iolist_size(Bytes), fun() -> prim_file:write(Hdl, Bytes) end).
+
+prim_file_sync(Hdl) ->
+    file_handle_cache_stats:update(io_sync, fun() -> prim_file:sync(Hdl) end).
+
+prim_file_position(Hdl, NewOffset) ->
+    file_handle_cache_stats:update(
+      io_seek, fun() -> prim_file:position(Hdl, NewOffset) end).
+
 is_reader(Mode) -> lists:member(read, Mode).
 
 is_writer(Mode) -> lists:member(write, Mode).
@@ -550,8 +649,15 @@ append_to_write(Mode) ->
     end.
 
 with_handles(Refs, Fun) ->
+    with_handles(Refs, reset, Fun).
+
+with_handles(Refs, ReadBuffer, Fun) ->
     case get_or_reopen([{Ref, reopen} || Ref <- Refs]) of
-        {ok, Handles} ->
+        {ok, Handles0} ->
+            Handles = case ReadBuffer of
+                          reset -> [reset_read_buffer(H) || H <- Handles0];
+                          keep  -> Handles0
+                      end,
             case Fun(Handles) of
                 {Result, Handles1} when is_list(Handles1) ->
                     lists:zipwith(fun put_handle/2, Refs, Handles1),
@@ -564,8 +670,11 @@ with_handles(Refs, Fun) ->
     end.
 
 with_flushed_handles(Refs, Fun) ->
+    with_flushed_handles(Refs, reset, Fun).
+
+with_flushed_handles(Refs, ReadBuffer, Fun) ->
     with_handles(
-      Refs,
+      Refs, ReadBuffer,
       fun (Handles) ->
               case lists:foldl(
                      fun (Handle, {ok, HandlesAcc}) ->
@@ -611,20 +720,23 @@ reopen([], Tree, RefHdls) ->
     {ok, lists:reverse(RefHdls)};
 reopen([{Ref, NewOrReopen, Handle = #handle { hdl          = closed,
                                               path         = Path,
-                                              mode         = Mode,
+                                              mode         = Mode0,
                                               offset       = Offset,
                                               last_used_at = undefined }} |
         RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) ->
-    case prim_file:open(Path, case NewOrReopen of
-                                  new    -> Mode;
-                                  reopen -> [read | Mode]
-                              end) of
+    Mode = case NewOrReopen of
+               new    -> Mode0;
+               reopen -> file_handle_cache_stats:update(io_reopen),
+                         [read | Mode0]
+           end,
+    case prim_file:open(Path, Mode) of
         {ok, Hdl} ->
             Now = now(),
             {{ok, _Offset}, Handle1} =
-                maybe_seek(Offset, Handle #handle { hdl          = Hdl,
-                                                    offset       = 0,
-                                                    last_used_at = Now }),
+                maybe_seek(Offset, reset_read_buffer(
+                                     Handle#handle{hdl              = Hdl,
+                                                   offset           = 0,
+                                                   last_used_at     = Now})),
             put({Ref, fhc_handle}, Handle1),
             reopen(RefNewOrReopenHdls, gb_trees:insert(Now, Ref, Tree),
                    [{Ref, Handle1} | RefHdls]);
@@ -709,6 +821,11 @@ new_closed_handle(Path, Mode, Options) ->
             infinity             -> infinity;
             N when is_integer(N) -> N
         end,
+    ReadBufferSize =
+        case proplists:get_value(read_buffer, Options, unbuffered) of
+            unbuffered             -> 0;
+            N2 when is_integer(N2) -> N2
+        end,
     Ref = make_ref(),
     put({Ref, fhc_handle}, #handle { hdl                     = closed,
                                      offset                  = 0,
@@ -716,6 +833,12 @@ new_closed_handle(Path, Mode, Options) ->
                                      write_buffer_size       = 0,
                                      write_buffer_size_limit = WriteBufferSize,
                                      write_buffer            = [],
+                                     read_buffer             = <<>>,
+                                     read_buffer_pos         = 0,
+                                     read_buffer_rem         = 0,
+                                     read_buffer_size        = ReadBufferSize,
+                                     read_buffer_size_limit  = ReadBufferSize,
+                                     read_buffer_usage       = 0,
                                      at_eof                  = false,
                                      path                    = Path,
                                      mode                    = Mode,
@@ -742,7 +865,7 @@ soft_close(Handle) ->
                        is_dirty    = IsDirty,
                        last_used_at = Then } = Handle1 } ->
             ok = case IsDirty of
-                     true  -> prim_file:sync(Hdl);
+                     true  -> prim_file_sync(Hdl);
                      false -> ok
                  end,
             ok = prim_file:close(Hdl),
@@ -776,17 +899,31 @@ hard_close(Handle) ->
             Result
     end.
 
-maybe_seek(NewOffset, Handle = #handle { hdl = Hdl, offset = Offset,
-                                         at_eof = AtEoF }) ->
-    {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Offset, NewOffset),
-    case (case NeedsSeek of
-              true  -> prim_file:position(Hdl, NewOffset);
-              false -> {ok, Offset}
-          end) of
-        {ok, Offset1} = Result ->
-            {Result, Handle #handle { offset = Offset1, at_eof = AtEoF1 }};
-        {error, _} = Error ->
-            {Error, Handle}
+maybe_seek(New, Handle = #handle{hdl              = Hdl,
+                                 offset           = Old,
+                                 read_buffer_pos  = BufPos,
+                                 read_buffer_rem  = BufRem,
+                                 at_eof           = AtEoF}) ->
+    {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Old, New),
+    case NeedsSeek of
+        true when is_number(New) andalso
+                  ((New >= Old andalso New =< BufRem + Old)
+                   orelse (New < Old andalso Old - New =< BufPos)) ->
+            Diff = New - Old,
+            {{ok, New}, Handle#handle{offset          = New,
+                                      at_eof          = AtEoF1,
+                                      read_buffer_pos = BufPos + Diff,
+                                      read_buffer_rem = BufRem - Diff}};
+        true ->
+            case prim_file_position(Hdl, New) of
+                {ok, Offset1} = Result ->
+                    {Result, reset_read_buffer(Handle#handle{offset = Offset1,
+                                                             at_eof = AtEoF1})};
+                {error, _} = Error ->
+                    {Error, Handle}
+            end;
+        false ->
+            {{ok, Old}, Handle}
     end.
 
 needs_seek( AtEoF, _CurOffset,  cur     ) -> {AtEoF, false};
@@ -817,7 +954,7 @@ write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
                                 write_buffer = WriteBuffer,
                                 write_buffer_size = DataSize,
                                 at_eof = true }) ->
-    case prim_file:write(Hdl, lists:reverse(WriteBuffer)) of
+    case prim_file_write(Hdl, lists:reverse(WriteBuffer)) of
         ok ->
             Offset1 = Offset + DataSize,
             {ok, Handle #handle { offset = Offset1, is_dirty = true,
@@ -826,6 +963,75 @@ write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
             {Error, Handle}
     end.
 
+reset_read_buffer(Handle) ->
+    Handle#handle{read_buffer     = <<>>,
+                  read_buffer_pos = 0,
+                  read_buffer_rem = 0}.
+
+%% We come into this function whenever there's been a miss while
+%% reading from the buffer - but note that when we first start with a
+%% new handle the usage will be 0.  Therefore in that case don't take
+%% it as meaning the buffer was useless, we just haven't done anything
+%% yet!
+tune_read_buffer_limit(Handle = #handle{read_buffer_usage = 0}, _Count) ->
+    Handle;
+%% In this head we have been using the buffer but now tried to read
+%% outside it. So how did we do? If we used less than the size of the
+%% buffer, make the new buffer the size of what we used before, but
+%% add one byte (so that next time we can distinguish between getting
+%% the buffer size exactly right and actually wanting more). If we
+%% read 100% of what we had, then double it for next time, up to the
+%% limit that was set when we were created.
+tune_read_buffer_limit(Handle = #handle{read_buffer            = Buf,
+                                        read_buffer_usage      = Usg,
+                                        read_buffer_size       = Sz,
+                                        read_buffer_size_limit = Lim}, Count) ->
+    %% If the buffer is <<>> then we are in the first read after a
+    %% reset, the read_buffer_usage is the total usage from before the
+    %% reset. But otherwise we are in a read which read off the end of
+    %% the buffer, so really the size of this read should be included
+    %% in the usage.
+    TotalUsg = case Buf of
+                   <<>> -> Usg;
+                   _    -> Usg + Count
+               end,
+    Handle#handle{read_buffer_usage = 0,
+                  read_buffer_size  = erlang:min(case TotalUsg < Sz of
+                                                     true  -> Usg + 1;
+                                                     false -> Usg * 2
+                                                 end, Lim)}.
+
+maybe_reduce_read_cache(SparedRefs) ->
+    case rabbit_memory_monitor:memory_use(bytes) of
+        {_, infinity}                             -> ok;
+        {MemUse, MemLimit} when MemUse < MemLimit -> ok;
+        {MemUse, MemLimit}                        -> reduce_read_cache(
+                                                       (MemUse - MemLimit) * 2,
+                                                       SparedRefs)
+    end.
+
+reduce_read_cache(MemToFree, SparedRefs) ->
+    Handles = lists:sort(
+      fun({_, H1}, {_, H2}) -> H1 < H2 end,
+      [{R, H} || {{R, fhc_handle}, H} <- get(),
+                 not lists:member(R, SparedRefs)
+                 andalso size(H#handle.read_buffer) > 0]),
+    FreedMem = lists:foldl(
+      fun
+          (_, Freed) when Freed >= MemToFree ->
+              Freed;
+          ({Ref, #handle{read_buffer = Buf} = Handle}, Freed) ->
+              Handle1 = reset_read_buffer(Handle),
+              put({Ref, fhc_handle}, Handle1),
+              Freed + size(Buf)
+      end, 0, Handles),
+    if
+        FreedMem < MemToFree andalso SparedRefs =/= [] ->
+            reduce_read_cache(MemToFree - FreedMem, []);
+        true ->
+            ok
+    end.
+
 infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
 
 i(total_limit,   #fhc_state{limit               = Limit}) -> Limit;
@@ -843,6 +1049,7 @@ used(#fhc_state{open_count          = C1,
 %%----------------------------------------------------------------------------
 
 init([AlarmSet, AlarmClear]) ->
+    file_handle_cache_stats:init(),
     Limit = case application:get_env(file_handles_high_watermark) of
                 {ok, Watermark} when (is_integer(Watermark) andalso
                                       Watermark > 0) ->
@@ -978,7 +1185,11 @@ handle_cast({transfer, N, FromPid, ToPid}, State) ->
     {noreply, process_pending(
                 update_counts({obtain, socket}, ToPid, +N,
                               update_counts({obtain, socket}, FromPid, -N,
-                                            State)))}.
+                                            State)))};
+
+handle_cast(clear_read_cache, State) ->
+    clear_process_read_cache(),
+    {noreply, State}.
 
 handle_info(check_counts, State) ->
     {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })};
diff --git a/rabbitmq-server/src/file_handle_cache_stats.erl b/rabbitmq-server/src/file_handle_cache_stats.erl
new file mode 100644 (file)
index 0000000..5f6926b
--- /dev/null
@@ -0,0 +1,67 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(file_handle_cache_stats).
+
+%% stats about read / write operations that go through the fhc.
+
+-export([init/0, update/3, update/2, update/1, get/0]).
+
+-define(TABLE, ?MODULE).
+
+-define(COUNT,
+        [io_reopen, mnesia_ram_tx, mnesia_disk_tx,
+         msg_store_read, msg_store_write,
+         queue_index_journal_write, queue_index_write, queue_index_read]).
+-define(COUNT_TIME, [io_sync, io_seek]).
+-define(COUNT_TIME_BYTES, [io_read, io_write]).
+
+init() ->
+    ets:new(?TABLE, [public, named_table]),
+    [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op      <- ?COUNT_TIME_BYTES,
+                                               Counter <- [count, bytes, time]],
+    [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op      <- ?COUNT_TIME,
+                                               Counter <- [count, time]],
+    [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op      <- ?COUNT,
+                                               Counter <- [count]].
+
+update(Op, Bytes, Thunk) ->
+    {Time, Res} = timer_tc(Thunk),
+    ets:update_counter(?TABLE, {Op, count}, 1),
+    ets:update_counter(?TABLE, {Op, bytes}, Bytes),
+    ets:update_counter(?TABLE, {Op, time}, Time),
+    Res.
+
+update(Op, Thunk) ->
+    {Time, Res} = timer_tc(Thunk),
+    ets:update_counter(?TABLE, {Op, count}, 1),
+    ets:update_counter(?TABLE, {Op, time}, Time),
+    Res.
+
+update(Op) ->
+    ets:update_counter(?TABLE, {Op, count}, 1),
+    ok.
+
+get() ->
+    lists:sort(ets:tab2list(?TABLE)).
+
+%% TODO timer:tc/1 was introduced in R14B03; use that function once we
+%% require that version.
+timer_tc(Thunk) ->
+    T1 = os:timestamp(),
+    Res = Thunk(),
+    T2 = os:timestamp(),
+    {timer:now_diff(T2, T1), Res}.
index 8bce170754acaf2b8fbd1cdb2c181126f34dfb5e..89bd46ca378d26df032055b98e3804bf1788bb21 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(gatherer).
 
+%% Gatherer is a queue which has producer and consumer processes. Before producers
+%% push items to the queue using gatherer:in/2 they need to declare their intent
+%% to do so with gatherer:fork/1. When a publisher's work is done, it states so
+%% using gatherer:finish/1.
+%%
+%% Consumers pop messages off queues with gatherer:out/1. If a queue is empty
+%% and there are producers that haven't finished working, the caller is blocked
+%% until an item is available. If there are no active producers, gatherer:out/1
+%% immediately returns 'empty'.
+%%
+%% This module is primarily used to collect results from asynchronous tasks
+%% running in a worker pool, e.g. when recovering bindings or rebuilding
+%% message store indices.
+
 -behaviour(gen_server2).
 
 -export([start_link/0, stop/1, fork/1, finish/1, in/2, sync_in/2, out/1]).
index ee82bcb31127e71b4c5133f9d2629316fc74cc40..fd0e6553b597a158155cc23b467d8a3cdc34533c 100644 (file)
@@ -69,7 +69,9 @@
 %% which will be passed into any of the callback functions in the new
 %% module. Note there is no form also encompassing a reply, thus if
 %% you wish to reply in handle_call/3 and change the callback module,
-%% you need to use gen_server2:reply/2 to issue the reply manually.
+%% you need to use gen_server2:reply/2 to issue the reply
+%% manually. The init function can similarly return a 5th argument,
+%% Module, in order to dynamically decide the callback module on init.
 %%
 %% 8) The callback module can optionally implement
 %% format_message_queue/2 which is the equivalent of format_status/2
 %%%     ==> {ok, State}
 %%%         {ok, State, Timeout}
 %%%         {ok, State, Timeout, Backoff}
+%%%         {ok, State, Timeout, Backoff, Module}
 %%%         ignore
 %%%         {stop, Reason}
 %%%
     {ok, State :: term(), timeout() | hibernate} |
     {ok, State :: term(), timeout() | hibernate,
      {backoff, millis(), millis(), millis()}} |
+    {ok, State :: term(), timeout() | hibernate,
+     {backoff, millis(), millis(), millis()}, atom()} |
     ignore |
     {stop, Reason :: term()}.
 -callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
@@ -568,6 +573,14 @@ init_it(Starter, Parent, Name0, Mod, Args, Options) ->
             loop(GS2State #gs2_state { state         = State,
                                        time          = Timeout,
                                        timeout_state = Backoff1 });
+        {ok, State, Timeout, Backoff = {backoff, _, _, _}, Mod1} ->
+            Backoff1 = extend_backoff(Backoff),
+            proc_lib:init_ack(Starter, {ok, self()}),
+            loop(find_prioritisers(
+                  GS2State #gs2_state { mod           = Mod1,
+                                        state         = State,
+                                        time          = Timeout,
+                                        timeout_state = Backoff1 }));
         {stop, Reason} ->
             %% For consistency, we must make sure that the
             %% registered name (if any) is unregistered before
index 2235da33ae4325db4e3ab639913a97690da14477..95dc84e41bbd4a58d354cd44c8ddb404106d8feb 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(gm).
@@ -62,7 +62,7 @@
 %%
 %% leave/1
 %% Provide the Pid. Removes the Pid from the group. The callback
-%% terminate/2 function will be called.
+%% handle_terminate/2 function will be called.
 %%
 %% broadcast/2
 %% Provide the Pid and a Message. The message will be sent to all
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
          code_change/3, prioritise_info/3]).
 
+%% For INSTR_MOD callbacks
+-export([call/3, cast/2, monitor/1, demonitor/1]).
+
 -ifndef(use_specs).
 -export([behaviour_info/1]).
 -endif.
 
 %% Called on gm member termination as per rules in gen_server, with
 %% the Args provided in start_link plus the termination Reason.
--callback terminate(Args :: term(), Reason :: term()) ->
+-callback handle_terminate(Args :: term(), Reason :: term()) ->
     ok | term().
 
 -else.
 
 behaviour_info(callbacks) ->
-    [{joined, 2}, {members_changed, 3}, {handle_msg, 3}, {terminate, 2}];
+    [{joined, 2}, {members_changed, 3}, {handle_msg, 3}, {handle_terminate, 2}];
 behaviour_info(_Other) ->
     undefined.
 
@@ -616,6 +619,16 @@ handle_call({add_on_right, NewMember}, _From,
                                             members_state = MembersState1 }),
     handle_callback_result({Result, {ok, Group}, State1}).
 
+%% add_on_right causes a catchup to be sent immediately from the left,
+%% so we can never see this from the left neighbour. However, it's
+%% possible for the right neighbour to send us a check_neighbours
+%% immediately before that. We can't possibly handle it, but if we're
+%% in this state we know a catchup is coming imminently anyway. So
+%% just ignore it.
+handle_cast({?TAG, _ReqVer, check_neighbours},
+            State = #state { members_state = undefined }) ->
+    noreply(State);
+
 handle_cast({?TAG, ReqVer, Msg},
             State = #state { view          = View,
                              members_state = MembersState,
@@ -707,6 +720,14 @@ handle_info({'DOWN', MRef, process, _Pid, Reason},
         {_, {shutdown, ring_shutdown}} ->
             noreply(State);
         _ ->
+            %% In the event of a partial partition we could see another member
+            %% go down and then remove them from Mnesia. While they can
+            %% recover from this they'd have to restart the queue - not
+            %% ideal. So let's sleep here briefly just in case this was caused
+            %% by a partial partition; in which case by the time we record the
+            %% member death in Mnesia we will probably be in a full
+            %% partition and will not be assassinating another member.
+            timer:sleep(100),
             View1 = group_to_view(record_dead_member_in_group(
                                     Member, GroupName, TxnFun)),
             handle_callback_result(
@@ -724,7 +745,7 @@ handle_info({'DOWN', MRef, process, _Pid, Reason},
 terminate(Reason, State = #state { module        = Module,
                                    callback_args = Args }) ->
     flush_broadcast_buffer(State),
-    Module:terminate(Args, Reason).
+    Module:handle_terminate(Args, Reason).
 
 
 code_change(_OldVsn, State, _Extra) ->
@@ -1177,8 +1198,8 @@ can_erase_view_member(Self, Self, _LA, _LP) -> false;
 can_erase_view_member(_Self, _Id,   N,   N) -> true;
 can_erase_view_member(_Self, _Id, _LA, _LP) -> false.
 
-neighbour_cast(N, Msg) -> gen_server2:cast(get_pid(N), Msg).
-neighbour_call(N, Msg) -> gen_server2:call(get_pid(N), Msg, infinity).
+neighbour_cast(N, Msg) -> ?INSTR_MOD:cast(get_pid(N), Msg).
+neighbour_call(N, Msg) -> ?INSTR_MOD:call(get_pid(N), Msg, infinity).
 
 %% ---------------------------------------------------------------------------
 %% View monitoring and maintanence
@@ -1192,7 +1213,7 @@ ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) ->
 ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) ->
     {RealNeighbour, MRef};
 ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
-    true = erlang:demonitor(MRef),
+    true = ?INSTR_MOD:demonitor(MRef),
     Msg = {?TAG, Ver, check_neighbours},
     ok = neighbour_cast(RealNeighbour, Msg),
     ok = case Neighbour of
@@ -1202,7 +1223,7 @@ ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
     {Neighbour, maybe_monitor(Neighbour, Self)}.
 
 maybe_monitor( Self,  Self) -> undefined;
-maybe_monitor(Other, _Self) -> erlang:monitor(process, get_pid(Other)).
+maybe_monitor(Other, _Self) -> ?INSTR_MOD:monitor(get_pid(Other)).
 
 check_neighbours(State = #state { self             = Self,
                                   left             = Left,
@@ -1461,3 +1482,12 @@ last_pub(  [], LP) -> LP;
 last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List),
                       true = PubNum > LP, %% ASSERTION
                       PubNum.
+
+%% ---------------------------------------------------------------------------
+
+%% Uninstrumented versions
+
+call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout).
+cast(Pid, Msg)          -> gen_server2:cast(Pid, Msg).
+monitor(Pid)            -> erlang:monitor(process, Pid).
+demonitor(MRef)         -> erlang:demonitor(MRef).
diff --git a/rabbitmq-server/src/gm_soak_test.erl b/rabbitmq-server/src/gm_soak_test.erl
deleted file mode 100644 (file)
index c9a2552..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(gm_soak_test).
-
--export([test/0]).
--export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
-%% ---------------------------------------------------------------------------
-%% Soak test
-%% ---------------------------------------------------------------------------
-
-get_state() ->
-    get(state).
-
-with_state(Fun) ->
-    put(state, Fun(get_state())).
-
-inc() ->
-    case 1 + get(count) of
-        100000 -> Now = now(),
-                  Start = put(ts, Now),
-                  Diff = timer:now_diff(Now, Start),
-                  Rate = 100000 / (Diff / 1000000),
-                  io:format("~p seeing ~p msgs/sec~n", [self(), Rate]),
-                  put(count, 0);
-        N      -> put(count, N)
-    end.
-
-joined([], Members) ->
-    io:format("Joined ~p (~p members)~n", [self(), length(Members)]),
-    put(state, dict:from_list([{Member, empty} || Member <- Members])),
-    put(count, 0),
-    put(ts, now()),
-    ok.
-
-members_changed([], Births, Deaths) ->
-    with_state(
-      fun (State) ->
-              State1 =
-                  lists:foldl(
-                    fun (Born, StateN) ->
-                            false = dict:is_key(Born, StateN),
-                            dict:store(Born, empty, StateN)
-                    end, State, Births),
-              lists:foldl(
-                fun (Died, StateN) ->
-                        true = dict:is_key(Died, StateN),
-                        dict:store(Died, died, StateN)
-                end, State1, Deaths)
-      end),
-    ok.
-
-handle_msg([], From, {test_msg, Num}) ->
-    inc(),
-    with_state(
-      fun (State) ->
-              ok = case dict:find(From, State) of
-                       {ok, died} ->
-                           exit({{from, From},
-                                 {received_posthumous_delivery, Num}});
-                       {ok, empty} -> ok;
-                       {ok, Num}   -> ok;
-                       {ok, Num1} when Num < Num1 ->
-                           exit({{from, From},
-                                 {duplicate_delivery_of, Num},
-                                 {expecting, Num1}});
-                       {ok, Num1} ->
-                           exit({{from, From},
-                                 {received_early, Num},
-                                 {expecting, Num1}});
-                       error ->
-                           exit({{from, From},
-                                 {received_premature_delivery, Num}})
-                   end,
-              dict:store(From, Num + 1, State)
-      end),
-    ok.
-
-terminate([], Reason) ->
-    io:format("Left ~p (~p)~n", [self(), Reason]),
-    ok.
-
-spawn_member() ->
-    spawn_link(
-      fun () ->
-              {MegaSecs, Secs, MicroSecs} = now(),
-              random:seed(MegaSecs, Secs, MicroSecs),
-              %% start up delay of no more than 10 seconds
-              timer:sleep(random:uniform(10000)),
-              {ok, Pid} = gm:start_link(
-                            ?MODULE, ?MODULE, [],
-                            fun rabbit_misc:execute_mnesia_transaction/1),
-              Start = random:uniform(10000),
-              send_loop(Pid, Start, Start + random:uniform(10000)),
-              gm:leave(Pid),
-              spawn_more()
-      end).
-
-spawn_more() ->
-    [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))].
-
-send_loop(_Pid, Target, Target) ->
-    ok;
-send_loop(Pid, Count, Target) when Target > Count ->
-    case random:uniform(3) of
-        3 -> gm:confirmed_broadcast(Pid, {test_msg, Count});
-        _ -> gm:broadcast(Pid, {test_msg, Count})
-    end,
-    timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms
-    send_loop(Pid, Count + 1, Target).
-
-test() ->
-    ok = gm:create_tables(),
-    spawn_member(),
-    spawn_member().
diff --git a/rabbitmq-server/src/gm_speed_test.erl b/rabbitmq-server/src/gm_speed_test.erl
deleted file mode 100644 (file)
index 41be6dd..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(gm_speed_test).
-
--export([test/3]).
--export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
--export([wile_e_coyote/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
-%% callbacks
-
-joined(Owner, _Members) ->
-    Owner ! joined,
-    ok.
-
-members_changed(_Owner, _Births, _Deaths) ->
-    ok.
-
-handle_msg(Owner, _From, ping) ->
-    Owner ! ping,
-    ok.
-
-terminate(Owner, _Reason) ->
-    Owner ! terminated,
-    ok.
-
-%% other
-
-wile_e_coyote(Time, WriteUnit) ->
-    {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
-                              fun rabbit_misc:execute_mnesia_transaction/1),
-    receive joined -> ok end,
-    timer:sleep(1000), %% wait for all to join
-    timer:send_after(Time, stop),
-    Start = now(),
-    {Sent, Received} = loop(Pid, WriteUnit, 0, 0),
-    End = now(),
-    ok = gm:leave(Pid),
-    receive terminated -> ok end,
-    Elapsed = timer:now_diff(End, Start) / 1000000,
-    io:format("Sending rate:   ~p msgs/sec~nReceiving rate: ~p msgs/sec~n~n",
-              [Sent/Elapsed, Received/Elapsed]),
-    ok.
-
-loop(Pid, WriteUnit, Sent, Received) ->
-    case read(Received) of
-        {stop, Received1} -> {Sent, Received1};
-        {ok,   Received1} -> ok = write(Pid, WriteUnit),
-                             loop(Pid, WriteUnit, Sent + WriteUnit, Received1)
-    end.
-
-read(Count) ->
-    receive
-        ping -> read(Count + 1);
-        stop -> {stop, Count}
-    after 5 ->
-            {ok, Count}
-    end.
-
-write(_Pid, 0) -> ok;
-write(Pid,  N) -> ok = gm:broadcast(Pid, ping),
-                  write(Pid, N - 1).
-
-test(Time, WriteUnit, Nodes) ->
-    ok = gm:create_tables(),
-    [spawn(Node, ?MODULE, wile_e_coyote, [Time, WriteUnit]) || Node <- Nodes].
diff --git a/rabbitmq-server/src/gm_tests.erl b/rabbitmq-server/src/gm_tests.erl
deleted file mode 100644 (file)
index cae2164..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(gm_tests).
-
--export([test_join_leave/0,
-         test_broadcast/0,
-         test_confirmed_broadcast/0,
-         test_member_death/0,
-         test_receive_in_order/0,
-         all_tests/0]).
--export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
--define(RECEIVE_OR_THROW(Body, Bool, Error),
-        receive Body ->
-                true = Bool,
-                passed
-        after 1000 ->
-                throw(Error)
-        end).
-
-joined(Pid, Members) ->
-    Pid ! {joined, self(), Members},
-    ok.
-
-members_changed(Pid, Births, Deaths) ->
-    Pid ! {members_changed, self(), Births, Deaths},
-    ok.
-
-handle_msg(Pid, From, Msg) ->
-    Pid ! {msg, self(), From, Msg},
-    ok.
-
-terminate(Pid, Reason) ->
-    Pid ! {termination, self(), Reason},
-    ok.
-
-%% ---------------------------------------------------------------------------
-%% Functional tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
-    passed = test_join_leave(),
-    passed = test_broadcast(),
-    passed = test_confirmed_broadcast(),
-    passed = test_member_death(),
-    passed = test_receive_in_order(),
-    passed.
-
-test_join_leave() ->
-    with_two_members(fun (_Pid, _Pid2) -> passed end).
-
-test_broadcast() ->
-    test_broadcast(fun gm:broadcast/2).
-
-test_confirmed_broadcast() ->
-    test_broadcast(fun gm:confirmed_broadcast/2).
-
-test_member_death() ->
-    with_two_members(
-      fun (Pid, Pid2) ->
-              {ok, Pid3} = gm:start_link(
-                             ?MODULE, ?MODULE, self(),
-                             fun rabbit_misc:execute_mnesia_transaction/1),
-              passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
-                                      timeout_joining_gm_group_3),
-              passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
-              passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
-
-              unlink(Pid3),
-              exit(Pid3, kill),
-
-              %% Have to do some broadcasts to ensure that all members
-              %% find out about the death.
-              passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))(
-                         Pid, Pid2),
-
-              passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
-              passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
-
-              passed
-      end).
-
-test_receive_in_order() ->
-    with_two_members(
-      fun (Pid, Pid2) ->
-              Numbers = lists:seq(1,1000),
-              [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
-               || N <- Numbers],
-              passed = receive_numbers(
-                         Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
-              passed = receive_numbers(
-                         Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
-              passed = receive_numbers(
-                         Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
-              passed = receive_numbers(
-                         Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
-              passed
-      end).
-
-test_broadcast(Fun) ->
-    with_two_members(test_broadcast_fun(Fun)).
-
-test_broadcast_fun(Fun) ->
-    fun (Pid, Pid2) ->
-            ok = Fun(Pid, magic_message),
-            passed = receive_or_throw({msg, Pid, Pid, magic_message},
-                                      timeout_waiting_for_msg),
-            passed = receive_or_throw({msg, Pid2, Pid, magic_message},
-                                      timeout_waiting_for_msg)
-    end.
-
-with_two_members(Fun) ->
-    ok = gm:create_tables(),
-
-    {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
-                              fun rabbit_misc:execute_mnesia_transaction/1),
-    passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
-
-    {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
-                               fun rabbit_misc:execute_mnesia_transaction/1),
-    passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
-    passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
-
-    passed = Fun(Pid, Pid2),
-
-    ok = gm:leave(Pid),
-    passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
-    passed =
-        receive_termination(Pid, normal, timeout_waiting_for_termination_1),
-
-    ok = gm:leave(Pid2),
-    passed =
-        receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
-
-    receive X -> throw({unexpected_message, X})
-    after 0 -> passed
-    end.
-
-receive_or_throw(Pattern, Error) ->
-    ?RECEIVE_OR_THROW(Pattern, true, Error).
-
-receive_birth(From, Born, Error) ->
-    ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
-                      ([Born] == Birth) andalso ([] == Death),
-                      Error).
-
-receive_death(From, Died, Error) ->
-    ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
-                      ([] == Birth) andalso ([Died] == Death),
-                      Error).
-
-receive_joined(From, Members, Error) ->
-    ?RECEIVE_OR_THROW({joined, From, Members1},
-                      lists:usort(Members) == lists:usort(Members1),
-                      Error).
-
-receive_termination(From, Reason, Error) ->
-    ?RECEIVE_OR_THROW({termination, From, Reason1},
-                      Reason == Reason1,
-                      Error).
-
-receive_numbers(_Pid, _Sender, _Error, []) ->
-    passed;
-receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
-    ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
-                      M == N,
-                      Error),
-    receive_numbers(Pid, Sender, Error, Numbers).
index 787f5088bee763fd6ad52c8832e9026aad232472..4e78346febe94175acd0a7ba56c29e362d5a65d2 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(lqueue).
 
+%% lqueue implements a subset of Erlang's queue module. lqueues
+%% maintain their own length, so lqueue:len/1
+%% is an O(1) operation, in contrast with queue:len/1 which is O(n).
+
 -export([new/0, is_empty/1, len/1, in/2, in_r/2, out/1, out_r/1, join/2,
          foldl/3, foldr/3, from_list/1, to_list/1, peek/1, peek_r/1]).
 
@@ -25,7 +29,7 @@
 
 -export_type([?MODULE/0]).
 
--opaque(?MODULE() :: {non_neg_integer(), ?QUEUE()}).
+-opaque(?MODULE() :: {non_neg_integer(), ?QUEUE:?QUEUE()}).
 -type(value()     :: any()).
 -type(result()    :: 'empty' | {'value', value()}).
 
index 1ed6d71098188014b994aebb8797aa82877e4f12..96c1418791592f5383f1d2742a9afa50b54fcf2d 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(mirrored_supervisor).
 -define(SUPERVISOR, supervisor2).
 -define(GEN_SERVER, gen_server2).
 -define(PG2,        pg2_fixed).
+-define(SUP_MODULE, mirrored_supervisor_sups).
 
 -define(TABLE, mirrored_sup_childspec).
 -define(TABLE_DEF,
          which_children/1, count_children/1, check_childspecs/1]).
 
 -behaviour(?GEN_SERVER).
--behaviour(?SUPERVISOR).
 
 -export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
          handle_cast/2]).
@@ -221,7 +221,7 @@ start_link({global, _SupName}, _Group, _TxFun, _Mod, _Args) ->
 
 start_link0(Prefix, Group, TxFun, Init) ->
     case apply(?SUPERVISOR, start_link,
-               Prefix ++ [?MODULE, {overall, Group, TxFun, Init}]) of
+               Prefix ++ [?SUP_MODULE, {overall, Group, TxFun, Init}]) of
         {ok, Pid} -> case catch call(Pid, {init, Pid}) of
                          ok -> {ok, Pid};
                          E  -> E
@@ -280,29 +280,12 @@ mirroring(Sup) -> child(Sup, mirroring).
 %%----------------------------------------------------------------------------
 
 start_internal(Group, TxFun, ChildSpecs) ->
-    ?GEN_SERVER:start_link(?MODULE, {mirroring, Group, TxFun, ChildSpecs},
+    ?GEN_SERVER:start_link(?MODULE, {Group, TxFun, ChildSpecs},
                            [{timeout, infinity}]).
 
 %%----------------------------------------------------------------------------
 
-init({overall, _Group, _TxFun, ignore}) -> ignore;
-init({overall,  Group,  TxFun, {ok, {Restart, ChildSpecs}}}) ->
-    %% Important: Delegate MUST start before Mirroring so that when we
-    %% shut down from above it shuts down last, so Mirroring does not
-    %% see it die.
-    %%
-    %% See comment in handle_info('DOWN', ...) below
-    {ok, {{one_for_all, 0, 1},
-          [{delegate, {?SUPERVISOR, start_link, [?MODULE, {delegate, Restart}]},
-            temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
-           {mirroring, {?MODULE, start_internal, [Group, TxFun, ChildSpecs]},
-            permanent, 16#ffffffff, worker, [?MODULE]}]}};
-
-
-init({delegate, Restart}) ->
-    {ok, {Restart, []}};
-
-init({mirroring, Group, TxFun, ChildSpecs}) ->
+init({Group, TxFun, ChildSpecs}) ->
     {ok, #state{group              = Group,
                 tx_fun             = TxFun,
                 initial_childspecs = ChildSpecs}}.
diff --git a/rabbitmq-server/src/mirrored_supervisor_sups.erl b/rabbitmq-server/src/mirrored_supervisor_sups.erl
new file mode 100644 (file)
index 0000000..404b86f
--- /dev/null
@@ -0,0 +1,43 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(mirrored_supervisor_sups).
+
+-define(SUPERVISOR, supervisor2).
+-define(GS_MODULE,  mirrored_supervisor).
+
+-behaviour(?SUPERVISOR).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+init({overall, _Group, _TxFun, ignore}) -> ignore;
+init({overall,  Group,  TxFun, {ok, {Restart, ChildSpecs}}}) ->
+    %% Important: Delegate MUST start before Mirroring so that when we
+    %% shut down from above it shuts down last, so Mirroring does not
+    %% see it die.
+    %%
+    %% See comment in handle_info('DOWN', ...) in mirrored_supervisor
+    {ok, {{one_for_all, 0, 1},
+          [{delegate, {?SUPERVISOR, start_link, [?MODULE, {delegate, Restart}]},
+            temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
+           {mirroring, {?GS_MODULE, start_internal, [Group, TxFun, ChildSpecs]},
+            permanent, 16#ffffffff, worker, [?MODULE]}]}};
+
+
+init({delegate, Restart}) ->
+    {ok, {Restart, []}}.
diff --git a/rabbitmq-server/src/mirrored_supervisor_tests.erl b/rabbitmq-server/src/mirrored_supervisor_tests.erl
deleted file mode 100644 (file)
index 26f59e0..0000000
+++ /dev/null
@@ -1,346 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(mirrored_supervisor_tests).
-
--compile([export_all]).
-
--export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
-         handle_cast/2]).
-
--behaviour(gen_server).
--behaviour(mirrored_supervisor).
-
--define(MS,  mirrored_supervisor).
-
-%% ---------------------------------------------------------------------------
-%% Functional tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
-    passed = test_migrate(),
-    passed = test_migrate_twice(),
-    passed = test_already_there(),
-    passed = test_delete_restart(),
-    passed = test_which_children(),
-    passed = test_large_group(),
-    passed = test_childspecs_at_init(),
-    passed = test_anonymous_supervisors(),
-    passed = test_no_migration_on_shutdown(),
-    passed = test_start_idempotence(),
-    passed = test_unsupported(),
-    passed = test_ignore(),
-    passed = test_startup_failure(),
-    passed.
-
-%% Simplest test
-test_migrate() ->
-    with_sups(fun([A, _]) ->
-                      ?MS:start_child(a, childspec(worker)),
-                      Pid1 = pid_of(worker),
-                      kill_registered(A, Pid1),
-                      Pid2 = pid_of(worker),
-                      false = (Pid1 =:= Pid2)
-              end, [a, b]).
-
-%% Is migration transitive?
-test_migrate_twice() ->
-    with_sups(fun([A, B]) ->
-                      ?MS:start_child(a, childspec(worker)),
-                      Pid1 = pid_of(worker),
-                      kill_registered(A, Pid1),
-                      {ok, C} = start_sup(c),
-                      Pid2 = pid_of(worker),
-                      kill_registered(B, Pid2),
-                      Pid3 = pid_of(worker),
-                      false = (Pid1 =:= Pid3),
-                      kill(C)
-              end, [a, b]).
-
-%% Can't start the same child twice
-test_already_there() ->
-    with_sups(fun([_, _]) ->
-                      S = childspec(worker),
-                      {ok, Pid}                       = ?MS:start_child(a, S),
-                      {error, {already_started, Pid}} = ?MS:start_child(b, S)
-              end, [a, b]).
-
-%% Deleting and restarting should work as per a normal supervisor
-test_delete_restart() ->
-    with_sups(fun([_, _]) ->
-                      S = childspec(worker),
-                      {ok, Pid1} = ?MS:start_child(a, S),
-                      {error, running} = ?MS:delete_child(a, worker),
-                      ok = ?MS:terminate_child(a, worker),
-                      ok = ?MS:delete_child(a, worker),
-                      {ok, Pid2} = ?MS:start_child(b, S),
-                      false = (Pid1 =:= Pid2),
-                      ok = ?MS:terminate_child(b, worker),
-                      {ok, Pid3} = ?MS:restart_child(b, worker),
-                      Pid3 = pid_of(worker),
-                      false = (Pid2 =:= Pid3),
-                      %% Not the same supervisor as the worker is on
-                      ok = ?MS:terminate_child(a, worker),
-                      ok = ?MS:delete_child(a, worker),
-                      {ok, Pid4} = ?MS:start_child(a, S),
-                      false = (Pid3 =:= Pid4)
-              end, [a, b]).
-
-test_which_children() ->
-    with_sups(
-      fun([A, B] = Both) ->
-              ?MS:start_child(A, childspec(worker)),
-              assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
-              ok = ?MS:terminate_child(a, worker),
-              assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
-              {ok, _} = ?MS:restart_child(a, worker),
-              assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
-              ?MS:start_child(B, childspec(worker2)),
-              assert_wc(Both, fun (C) -> 2 = length(C) end)
-      end, [a, b]).
-
-assert_wc(Sups, Fun) ->
-    [Fun(?MS:which_children(Sup)) || Sup <- Sups].
-
-wc_pid(Child) ->
-    {worker, Pid, worker, [mirrored_supervisor_tests]} = Child,
-    Pid.
-
-%% Not all the members of the group should actually do the failover
-test_large_group() ->
-    with_sups(fun([A, _, _, _]) ->
-                      ?MS:start_child(a, childspec(worker)),
-                      Pid1 = pid_of(worker),
-                      kill_registered(A, Pid1),
-                      Pid2 = pid_of(worker),
-                      false = (Pid1 =:= Pid2)
-              end, [a, b, c, d]).
-
-%% Do childspecs work when returned from init?
-test_childspecs_at_init() ->
-    S = childspec(worker),
-    with_sups(fun([A, _]) ->
-                      Pid1 = pid_of(worker),
-                      kill_registered(A, Pid1),
-                      Pid2 = pid_of(worker),
-                      false = (Pid1 =:= Pid2)
-              end, [{a, [S]}, {b, [S]}]).
-
-test_anonymous_supervisors() ->
-    with_sups(fun([A, _B]) ->
-                      ?MS:start_child(A, childspec(worker)),
-                      Pid1 = pid_of(worker),
-                      kill_registered(A, Pid1),
-                      Pid2 = pid_of(worker),
-                      false = (Pid1 =:= Pid2)
-              end, [anon, anon]).
-
-%% When a mirrored_supervisor terminates, we should not migrate, but
-%% the whole supervisor group should shut down. To test this we set up
-%% a situation where the gen_server will only fail if it's running
-%% under the supervisor called 'evil'. It should not migrate to
-%% 'good' and survive, rather the whole group should go away.
-test_no_migration_on_shutdown() ->
-    with_sups(fun([Evil, _]) ->
-                      ?MS:start_child(Evil, childspec(worker)),
-                      try
-                          call(worker, ping, 1000, 100),
-                          exit(worker_should_not_have_migrated)
-                      catch exit:{timeout_waiting_for_server, _, _} ->
-                              ok
-                      end
-              end, [evil, good]).
-
-test_start_idempotence() ->
-    with_sups(fun([_]) ->
-                      CS = childspec(worker),
-                      {ok, Pid}                       = ?MS:start_child(a, CS),
-                      {error, {already_started, Pid}} = ?MS:start_child(a, CS),
-                      ?MS:terminate_child(a, worker),
-                      {error, already_present}        = ?MS:start_child(a, CS)
-              end, [a]).
-
-test_unsupported() ->
-    try
-        ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
-                       {sup, one_for_one, []}),
-        exit(no_global)
-    catch error:badarg ->
-            ok
-    end,
-    try
-        ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
-                       {sup, simple_one_for_one, []}),
-        exit(no_sofo)
-    catch error:badarg ->
-            ok
-    end,
-    passed.
-
-%% Just test we don't blow up
-test_ignore() ->
-    ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
-                   {sup, fake_strategy_for_ignore, []}),
-    passed.
-
-test_startup_failure() ->
-    [test_startup_failure(F) || F <- [want_error, want_exit]],
-    passed.
-
-test_startup_failure(Fail) ->
-    process_flag(trap_exit, true),
-    ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
-                   {sup, one_for_one, [childspec(Fail)]}),
-    receive
-        {'EXIT', _, shutdown} ->
-            ok
-    after 1000 ->
-            exit({did_not_exit, Fail})
-    end,
-    process_flag(trap_exit, false).
-
-%% ---------------------------------------------------------------------------
-
-with_sups(Fun, Sups) ->
-    inc_group(),
-    Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
-    Fun(Pids),
-    [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
-    timer:sleep(500),
-    passed.
-
-start_sup(Spec) ->
-    start_sup(Spec, group).
-
-start_sup({Name, ChildSpecs}, Group) ->
-    {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
-    %% We are not a supervisor, when we kill the supervisor we do not
-    %% want to die!
-    unlink(Pid),
-    {ok, Pid};
-
-start_sup(Name, Group) ->
-    start_sup({Name, []}, Group).
-
-start_sup0(anon, Group, ChildSpecs) ->
-    ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
-                   {sup, one_for_one, ChildSpecs});
-
-start_sup0(Name, Group, ChildSpecs) ->
-    ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
-                   {sup, one_for_one, ChildSpecs}).
-
-childspec(Id) ->
-    {Id, {?MODULE, start_gs, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
-
-start_gs(want_error) ->
-    {error, foo};
-
-start_gs(want_exit) ->
-    exit(foo);
-
-start_gs(Id) ->
-    gen_server:start_link({local, Id}, ?MODULE, server, []).
-
-pid_of(Id) ->
-    {received, Pid, ping} = call(Id, ping),
-    Pid.
-
-tx_fun(Fun) ->
-    case mnesia:sync_transaction(Fun) of
-        {atomic,  Result}         -> Result;
-        {aborted, Reason}         -> throw({error, Reason})
-    end.
-
-inc_group() ->
-    Count = case get(counter) of
-                undefined -> 0;
-                C         -> C
-            end + 1,
-    put(counter, Count).
-
-get_group(Group) ->
-    {Group, get(counter)}.
-
-call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
-
-call(Id, Msg, 0, _Decr) ->
-    exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()});
-
-call(Id, Msg, MaxDelay, Decr) ->
-    try
-        gen_server:call(Id, Msg, infinity)
-    catch exit:_ -> timer:sleep(Decr),
-                    call(Id, Msg, MaxDelay - Decr, Decr)
-    end.
-
-kill(Pid) -> kill(Pid, []).
-kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
-kill(Pid, Waits) ->
-    erlang:monitor(process, Pid),
-    [erlang:monitor(process, P) || P <- Waits],
-    exit(Pid, bang),
-    kill_wait(Pid),
-    [kill_wait(P) || P <- Waits].
-
-kill_registered(Pid, Child) ->
-    {registered_name, Name} = erlang:process_info(Child, registered_name),
-    kill(Pid, Child),
-    false = (Child =:= whereis(Name)),
-    ok.
-
-kill_wait(Pid) ->
-    receive
-        {'DOWN', _Ref, process, Pid, _Reason} ->
-            ok
-    end.
-
-%% ---------------------------------------------------------------------------
-%% Dumb gen_server we can supervise
-%% ---------------------------------------------------------------------------
-
-init({sup, fake_strategy_for_ignore, _ChildSpecs}) ->
-    ignore;
-
-init({sup, Strategy, ChildSpecs}) ->
-    {ok, {{Strategy, 0, 1}, ChildSpecs}};
-
-init(server) ->
-    {ok, state}.
-
-handle_call(Msg, _From, State) ->
-    die_if_my_supervisor_is_evil(),
-    {reply, {received, self(), Msg}, State}.
-
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-die_if_my_supervisor_is_evil() ->
-    try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
-        false -> ok;
-        _     -> exit(doooom)
-    catch
-        exit:{noproc, _} -> ok
-    end.
index 8fa54d6535e2dba34c02ba34d55a140aee949356..153017bca807be092e24fab03f9ee2e847c229fa 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(mnesia_sync).
index f535b1362b30c198a91cd6655f4393114a3862b0..4d9914d9b75f3ea938288cccfe2481778917dcd7 100644 (file)
@@ -34,7 +34,7 @@
 %%
 -module(pg_local).
 
--export([join/2, leave/2, get_members/1]).
+-export([join/2, leave/2, get_members/1, in_group/2]).
 -export([sync/0]). %% intended for testing only; not part of official API
 -export([start/0, start_link/0, init/1, handle_call/3, handle_cast/2,
          handle_info/2, terminate/2]).
@@ -50,6 +50,7 @@
 -spec(join/2 :: (name(), pid()) -> 'ok').
 -spec(leave/2 :: (name(), pid()) -> 'ok').
 -spec(get_members/1 :: (name()) -> [pid()]).
+-spec(in_group/2 :: (name(), pid()) -> boolean()).
 
 -spec(sync/0 :: () -> 'ok').
 
@@ -81,6 +82,16 @@ get_members(Name) ->
     ensure_started(),
     group_members(Name).
 
+in_group(Name, Pid) ->
+    ensure_started(),
+    %% The join message is a cast and thus can race, but we want to
+    %% keep it that way to be fast in the common case.
+    case member_present(Name, Pid) of
+        true  -> true;
+        false -> sync(),
+                 member_present(Name, Pid)
+    end.
+
 sync() ->
     ensure_started(),
     gen_server:call(?MODULE, sync, infinity).
@@ -199,6 +210,12 @@ member_in_group(Pid, Name) ->
     [{{member, Name, Pid}, N}] = ets:lookup(pg_local_table, {member, Name, Pid}),
     lists:duplicate(N, Pid).
 
+member_present(Name, Pid) ->
+    case ets:lookup(pg_local_table, {member, Name, Pid}) of
+        [_] -> true;
+        []  -> false
+    end.
+
 member_groups(Pid) ->
     [Name || [Name] <- ets:match(pg_local_table, {{pid, Pid, '$1'}})].
 
index de3e9fea405a3bc168f566dc43d8b3ac10fda445..f42530022a0e6a2a3f881ceb638fd1b07b9cae87 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(pmon).
 
+%% Process Monitor
+%% ================
+%%
+%% This module monitors processes so that every process has at most
+%% 1 monitor.
+%% Processes monitored can be dynamically added and removed.
+%%
+%% Unlike erlang:[de]monitor* functions, this module
+%% provides basic querying capability and avoids contacting down nodes.
+%%
+%% It is used to monitor nodes, queue mirrors, and by
+%% the queue collector, among other things.
+
 -export([new/0, new/1, monitor/2, monitor_all/2, demonitor/2,
          is_monitored/2, erase/2, monitored/1, is_empty/1]).
 
@@ -29,7 +42,7 @@
 
 -export_type([?MODULE/0]).
 
--opaque(?MODULE() :: #state{dict   :: dict(),
+-opaque(?MODULE() :: #state{dict   :: dict:dict(),
                             module :: atom()}).
 
 -type(item()         :: pid() | {atom(), node()}).
index a3573bbd04c29fa814a9bf02e99d9a31f9938a21..88c69513d7d0962980eb3dd33beac11e2e098a46 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% Priority queues have essentially the same interface as ordinary
index 29e38c1f5f7804643ae2e6dc6a4898badd8d7dc0..84aaf4e20ca42f0a15b4b2c27d6cffa5abd2d228 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit).
          stop_and_halt/0, await_startup/0, status/0, is_running/0,
          is_running/1, environment/0, rotate_logs/1, force_event_refresh/1,
          start_fhc/0]).
-
 -export([start/2, stop/1]).
-
--export([log_location/1]). %% for testing
+-export([start_apps/1, stop_apps/1]).
+-export([log_location/1, config_files/0]). %% for testing and mgmt-agent
 
 %%---------------------------------------------------------------------------
 %% Boot steps.
                     {requires,    pre_boot},
                     {enables,     external_infrastructure}]}).
 
+%% rabbit_alarm currently starts memory and disk space monitors
+-rabbit_boot_step({rabbit_alarm,
+                   [{description, "alarm handler"},
+                    {mfa,         {rabbit_alarm, start, []}},
+                    {requires,    pre_boot},
+                    {enables,     external_infrastructure}]}).
+
 -rabbit_boot_step({database,
                    [{mfa,         {rabbit_mnesia, init, []}},
                     {requires,    file_handle_cache},
@@ -55,7 +61,8 @@
 -rabbit_boot_step({file_handle_cache,
                    [{description, "file handle cache server"},
                     {mfa,         {rabbit, start_fhc, []}},
-                    {requires,    pre_boot},
+                    %% FHC needs memory monitor to be running
+                    {requires,    rabbit_alarm},
                     {enables,     worker_pool}]}).
 
 -rabbit_boot_step({worker_pool,
                     {requires,    external_infrastructure},
                     {enables,     kernel_ready}]}).
 
--rabbit_boot_step({rabbit_log,
-                   [{description, "logging server"},
-                    {mfa,         {rabbit_sup, start_restartable_child,
-                                   [rabbit_log]}},
-                    {requires,    external_infrastructure},
-                    {enables,     kernel_ready}]}).
-
 -rabbit_boot_step({rabbit_event,
                    [{description, "statistics event manager"},
                     {mfa,         {rabbit_sup, start_restartable_child,
                    [{description, "kernel ready"},
                     {requires,    external_infrastructure}]}).
 
--rabbit_boot_step({rabbit_alarm,
-                   [{description, "alarm handler"},
-                    {mfa,         {rabbit_alarm, start, []}},
-                    {requires,    kernel_ready},
-                    {enables,     core_initialized}]}).
-
 -rabbit_boot_step({rabbit_memory_monitor,
                    [{description, "memory monitor"},
                     {mfa,         {rabbit_sup, start_restartable_child,
                    [{description, "node monitor"},
                     {mfa,         {rabbit_sup, start_restartable_child,
                                    [rabbit_node_monitor]}},
-                    {requires,    rabbit_alarm},
+                    {requires,    [rabbit_alarm, guid_generator]},
+                    {enables,     core_initialized}]}).
+
+-rabbit_boot_step({rabbit_epmd_monitor,
+                   [{description, "epmd monitor"},
+                    {mfa,         {rabbit_sup, start_restartable_child,
+                                   [rabbit_epmd_monitor]}},
+                    {requires,    kernel_ready},
                     {enables,     core_initialized}]}).
 
 -rabbit_boot_step({core_initialized,
                     {requires,    core_initialized},
                     {enables,     routing_ready}]}).
 
--rabbit_boot_step({mirror_queue_slave_sup,
-                   [{description, "mirror queue slave sup"},
-                    {mfa,         {rabbit_sup, start_supervisor_child,
-                                   [rabbit_mirror_queue_slave_sup]}},
-                    {requires,    recovery},
-                    {enables,     routing_ready}]}).
-
 -rabbit_boot_step({mirrored_queues,
                    [{description, "adding mirrors to queues"},
                     {mfa,         {rabbit_mirror_queue_misc, on_node_up, []}},
-                    {requires,    mirror_queue_slave_sup},
+                    {requires,    recovery},
                     {enables,     routing_ready}]}).
 
 -rabbit_boot_step({routing_ready,
 %% practice 2 processes seems just as fast as any other number > 1,
 %% and keeps the progress bar realistic-ish.
 -define(HIPE_PROCESSES, 2).
+-define(ASYNC_THREADS_WARNING_THRESHOLD, 8).
 
 %%----------------------------------------------------------------------------
 
 %% this really should be an abstract type
 -type(log_location() :: 'tty' | 'undefined' | file:filename()).
 -type(param() :: atom()).
+-type(app_name() :: atom()).
 
 -spec(start/0 :: () -> 'ok').
 -spec(boot/0 :: () -> 'ok').
 -spec(maybe_insert_default_data/0 :: () -> 'ok').
 -spec(boot_delegate/0 :: () -> 'ok').
 -spec(recover/0 :: () -> 'ok').
+-spec(start_apps/1 :: ([app_name()]) -> 'ok').
+-spec(stop_apps/1 :: ([app_name()]) -> 'ok').
 
 -endif.
 
@@ -254,16 +252,20 @@ maybe_hipe_compile() ->
     {ok, Want} = application:get_env(rabbit, hipe_compile),
     Can = code:which(hipe) =/= non_existing,
     case {Want, Can} of
-        {true,  true}  -> hipe_compile(),
-                          true;
+        {true,  true}  -> hipe_compile();
         {true,  false} -> false;
-        {false, _}     -> true
+        {false, _}     -> {ok, disabled}
     end.
 
-warn_if_hipe_compilation_failed(true) ->
+log_hipe_result({ok, disabled}) ->
     ok;
-warn_if_hipe_compilation_failed(false) ->
-    error_logger:warning_msg(
+log_hipe_result({ok, Count, Duration}) ->
+    rabbit_log:info(
+      "HiPE in use: compiled ~B modules in ~Bs.~n", [Count, Duration]);
+log_hipe_result(false) ->
+    io:format(
+      "~nNot HiPE compiling: HiPE not found in this Erlang installation.~n"),
+    rabbit_log:warning(
       "Not HiPE compiling: HiPE not found in this Erlang installation.~n").
 
 %% HiPE compilation happens before we have log handlers and can take a
@@ -287,8 +289,9 @@ hipe_compile() ->
          {'DOWN', MRef, process, _, Reason} -> exit(Reason)
      end || {_Pid, MRef} <- PidMRefs],
     T2 = erlang:now(),
-    io:format("|~n~nCompiled ~B modules in ~Bs~n",
-              [Count, timer:now_diff(T2, T1) div 1000000]).
+    Duration = timer:now_diff(T2, T1) div 1000000,
+    io:format("|~n~nCompiled ~B modules in ~Bs~n", [Count, Duration]),
+    {ok, Count, Duration}.
 
 split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
 
@@ -312,38 +315,34 @@ start() ->
                      ok = ensure_working_log_handlers(),
                      rabbit_node_monitor:prepare_cluster_status_files(),
                      rabbit_mnesia:check_cluster_consistency(),
-                     ok = app_utils:start_applications(
-                            app_startup_order(), fun handle_app_error/2),
-                     ok = log_broker_started(rabbit_plugins:active())
+                     broker_start()
              end).
 
 boot() ->
     start_it(fun() ->
                      ok = ensure_application_loaded(),
-                     Success = maybe_hipe_compile(),
+                     HipeResult = maybe_hipe_compile(),
                      ok = ensure_working_log_handlers(),
-                     warn_if_hipe_compilation_failed(Success),
+                     log_hipe_result(HipeResult),
                      rabbit_node_monitor:prepare_cluster_status_files(),
                      ok = rabbit_upgrade:maybe_upgrade_mnesia(),
                      %% It's important that the consistency check happens after
                      %% the upgrade, since if we are a secondary node the
                      %% primary node will have forgotten us
                      rabbit_mnesia:check_cluster_consistency(),
-                     Plugins = rabbit_plugins:setup(),
-                     ToBeLoaded = Plugins ++ ?APPS,
-                     ok = app_utils:load_applications(ToBeLoaded),
-                     StartupApps = app_utils:app_dependency_order(ToBeLoaded,
-                                                                  false),
-                     ok = app_utils:start_applications(
-                            StartupApps, fun handle_app_error/2),
-                     ok = log_broker_started(Plugins)
+                     broker_start()
              end).
 
-handle_app_error(App, {bad_return, {_MFA, {'EXIT', {Reason, _}}}}) ->
-    throw({could_not_start, App, Reason});
-
-handle_app_error(App, Reason) ->
-    throw({could_not_start, App, Reason}).
+broker_start() ->
+    Plugins = rabbit_plugins:setup(),
+    ToBeLoaded = Plugins ++ ?APPS,
+    start_apps(ToBeLoaded),
+    case code:load_file(sd_notify) of
+        {module, sd_notify} -> SDNotify = sd_notify,
+                               SDNotify:sd_notify(0, "READY=1");
+        {error, _} -> ok
+    end,
+    ok = log_broker_started(rabbit_plugins:active()).
 
 start_it(StartFun) ->
     Marker = spawn_link(fun() -> receive stop -> ok end end),
@@ -354,7 +353,7 @@ start_it(StartFun) ->
                         false -> StartFun()
                     end
                 catch
-                    throw:{could_not_start, _App, _Reason}=Err ->
+                    throw:{could_not_start, _App, _Reason} = Err ->
                         boot_error(Err, not_available);
                     _:Reason ->
                         boot_error(Reason, erlang:get_stacktrace())
@@ -371,22 +370,67 @@ start_it(StartFun) ->
 stop() ->
     case whereis(rabbit_boot) of
         undefined -> ok;
-        _         -> await_startup()
+        _         -> await_startup(true)
     end,
-    rabbit_log:info("Stopping RabbitMQ~n"),
-    ok = app_utils:stop_applications(app_shutdown_order()).
+    rabbit_log:info("Stopping RabbitMQ~n", []),
+    Apps = ?APPS ++ rabbit_plugins:active(),
+    stop_apps(app_utils:app_dependency_order(Apps, true)),
+    rabbit_log:info("Stopped RabbitMQ application~n", []).
 
 stop_and_halt() ->
     try
         stop()
     after
-        rabbit_misc:local_info_msg("Halting Erlang VM~n", []),
+        rabbit_log:info("Halting Erlang VM~n", []),
         init:stop()
     end,
     ok.
 
+start_apps(Apps) ->
+    app_utils:load_applications(Apps),
+    OrderedApps = app_utils:app_dependency_order(Apps, false),
+    case lists:member(rabbit, Apps) of
+        false -> run_boot_steps(Apps); %% plugin activation
+        true  -> ok                    %% will run during start of rabbit app
+    end,
+    ok = app_utils:start_applications(OrderedApps,
+                                      handle_app_error(could_not_start)).
+
+stop_apps(Apps) ->
+    ok = app_utils:stop_applications(
+           Apps, handle_app_error(error_during_shutdown)),
+    case lists:member(rabbit, Apps) of
+        false -> run_cleanup_steps(Apps); %% plugin deactivation
+        true  -> ok                       %% it's all going anyway
+    end,
+    ok.
+
+handle_app_error(Term) ->
+    fun(App, {bad_return, {_MFA, {'EXIT', ExitReason}}}) ->
+            throw({Term, App, ExitReason});
+       (App, Reason) ->
+            throw({Term, App, Reason})
+    end.
+
+run_cleanup_steps(Apps) ->
+    [run_step(Attrs, cleanup) || Attrs <- find_steps(Apps)],
+    ok.
+
 await_startup() ->
-    app_utils:wait_for_applications(app_startup_order()).
+    await_startup(false).
+
+await_startup(HaveSeenRabbitBoot) ->
+    %% We don't take absence of rabbit_boot as evidence we've started,
+    %% since there's a small window before it is registered.
+    case whereis(rabbit_boot) of
+        undefined -> case HaveSeenRabbitBoot orelse is_running() of
+                         true  -> ok;
+                         false -> timer:sleep(100),
+                                  await_startup(false)
+                     end;
+        _         -> timer:sleep(100),
+                     await_startup(true)
+    end.
 
 status() ->
     S1 = [{pid,                  list_to_integer(os:getpid())},
@@ -437,17 +481,25 @@ listeners() ->
                   ip_address = IP,
                   port       = Port} <- Listeners, Node =:= node()].
 
+%% TODO this only determines if the rabbit application has started,
+%% not if it is running, never mind plugins. It would be nice to have
+%% more nuance here.
 is_running() -> is_running(node()).
 
 is_running(Node) -> rabbit_nodes:is_process_running(Node, rabbit).
 
 environment() ->
-    lists:keysort(1, [P || P = {K, _} <- application:get_all_env(rabbit),
-                           K =/= default_pass]).
+    [{A, environment(A)} ||
+        {A, _, _} <- lists:keysort(1, application:which_applications())].
+
+environment(App) ->
+    Ignore = [default_pass, included_applications],
+    lists:keysort(1, [P || P = {K, _} <- application:get_all_env(App),
+                           not lists:member(K, Ignore)]).
 
 rotate_logs(BinarySuffix) ->
     Suffix = binary_to_list(BinarySuffix),
-    rabbit_misc:local_info_msg("Rotating logs with suffix '~s'~n", [Suffix]),
+    rabbit_log:info("Rotating logs with suffix '~s'~n", [Suffix]),
     log_rotation_result(rotate_logs(log_location(kernel),
                                     Suffix,
                                     rabbit_error_logger_file_h),
@@ -460,15 +512,15 @@ rotate_logs(BinarySuffix) ->
 start(normal, []) ->
     case erts_version_check() of
         ok ->
-            {ok, Vsn} = application:get_key(rabbit, vsn),
-            error_logger:info_msg("Starting RabbitMQ ~s on Erlang ~s~n~s~n~s~n",
-                                  [Vsn, erlang:system_info(otp_release),
-                                   ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]),
+            rabbit_log:info("Starting RabbitMQ ~s on Erlang ~s~n~s~n~s~n",
+                            [rabbit_misc:version(), rabbit_misc:otp_release(),
+                             ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]),
             {ok, SupPid} = rabbit_sup:start_link(),
             true = register(rabbit, self()),
             print_banner(),
             log_banner(),
-            [ok = run_boot_step(Step) || Step <- boot_steps()],
+            warn_if_kernel_config_dubious(),
+            run_boot_steps(),
             {ok, SupPid};
         Error ->
             Error
@@ -483,51 +535,49 @@ stop(_State) ->
     ok.
 
 %%---------------------------------------------------------------------------
-%% application life cycle
+%% boot step logic
 
-app_startup_order() ->
-    ok = app_utils:load_applications(?APPS),
-    app_utils:app_dependency_order(?APPS, false).
+run_boot_steps() ->
+    run_boot_steps([App || {App, _, _} <- application:loaded_applications()]).
 
-app_shutdown_order() ->
-    Apps = ?APPS ++ rabbit_plugins:active(),
-    app_utils:app_dependency_order(Apps, true).
+run_boot_steps(Apps) ->
+    [ok = run_step(Attrs, mfa) || Attrs <- find_steps(Apps)],
+    ok.
 
-%%---------------------------------------------------------------------------
-%% boot step logic
+find_steps(Apps) ->
+    All = sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)),
+    [Attrs || {App, _, Attrs} <- All, lists:member(App, Apps)].
 
-run_boot_step({_StepName, Attributes}) ->
-    case [MFA || {mfa, MFA} <- Attributes] of
+run_step(Attributes, AttributeName) ->
+    case [MFA || {Key, MFA} <- Attributes,
+                 Key =:= AttributeName] of
         [] ->
             ok;
         MFAs ->
-            [try
-                 apply(M,F,A)
-             of
-                 ok ->              ok;
-                 {error, Reason} -> boot_error(Reason, not_available)
-             catch
-                 _:Reason -> boot_error(Reason, erlang:get_stacktrace())
+            [case apply(M,F,A) of
+                 ok              -> ok;
+                 {error, Reason} -> exit({error, Reason})
              end || {M,F,A} <- MFAs],
             ok
     end.
 
-boot_steps() ->
-    sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)).
+vertices({AppName, _Module, Steps}) ->
+    [{StepName, {AppName, StepName, Atts}} || {StepName, Atts} <- Steps].
 
-vertices(_Module, Steps) ->
-    [{StepName, {StepName, Atts}} || {StepName, Atts} <- Steps].
-
-edges(_Module, Steps) ->
+edges({_AppName, _Module, Steps}) ->
+    EnsureList = fun (L) when is_list(L) -> L;
+                     (T)                 -> [T]
+                 end,
     [case Key of
          requires -> {StepName, OtherStep};
          enables  -> {OtherStep, StepName}
      end || {StepName, Atts} <- Steps,
-            {Key, OtherStep} <- Atts,
+            {Key, OtherStepOrSteps} <- Atts,
+            OtherStep <- EnsureList(OtherStepOrSteps),
             Key =:= requires orelse Key =:= enables].
 
 sort_boot_steps(UnsortedSteps) ->
-    case rabbit_misc:build_acyclic_graph(fun vertices/2, fun edges/2,
+    case rabbit_misc:build_acyclic_graph(fun vertices/1, fun edges/1,
                                          UnsortedSteps) of
         {ok, G} ->
             %% Use topological sort to find a consistent ordering (if
@@ -541,52 +591,44 @@ sort_boot_steps(UnsortedSteps) ->
             digraph:delete(G),
             %% Check that all mentioned {M,F,A} triples are exported.
             case [{StepName, {M,F,A}} ||
-                     {StepName, Attributes} <- SortedSteps,
-                     {mfa, {M,F,A}}         <- Attributes,
+                     {_App, StepName, Attributes} <- SortedSteps,
+                     {mfa, {M,F,A}}               <- Attributes,
                      not erlang:function_exported(M, F, length(A))] of
-                []               -> SortedSteps;
-                MissingFunctions -> basic_boot_error(
-                                      {missing_functions, MissingFunctions},
-                                      "Boot step functions not exported: ~p~n",
-                                      [MissingFunctions])
+                []         -> SortedSteps;
+                MissingFns -> exit({boot_functions_not_exported, MissingFns})
             end;
         {error, {vertex, duplicate, StepName}} ->
-            basic_boot_error({duplicate_boot_step, StepName},
-                             "Duplicate boot step name: ~w~n", [StepName]);
+            exit({duplicate_boot_step, StepName});
         {error, {edge, Reason, From, To}} ->
-            basic_boot_error(
-              {invalid_boot_step_dependency, From, To},
-              "Could not add boot step dependency of ~w on ~w:~n~s",
-              [To, From,
-               case Reason of
-                   {bad_vertex, V} ->
-                       io_lib:format("Boot step not registered: ~w~n", [V]);
-                   {bad_edge, [First | Rest]} ->
-                       [io_lib:format("Cyclic dependency: ~w", [First]),
-                        [io_lib:format(" depends on ~w", [Next]) ||
-                            Next <- Rest],
-                        io_lib:format(" depends on ~w~n", [First])]
-               end])
+            exit({invalid_boot_step_dependency, From, To, Reason})
     end.
 
 -ifdef(use_specs).
 -spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()).
 -endif.
-boot_error(Term={error, {timeout_waiting_for_tables, _}}, _Stacktrace) ->
+boot_error({could_not_start, rabbit, {{timeout_waiting_for_tables, _}, _}},
+           _Stacktrace) ->
     AllNodes = rabbit_mnesia:cluster_nodes(all),
+    Suffix = "~nBACKGROUND~n==========~n~n"
+        "This cluster node was shut down while other nodes were still running.~n"
+        "To avoid losing data, you should start the other nodes first, then~n"
+        "start this one. To force this node to start, first invoke~n"
+        "\"rabbitmqctl force_boot\". If you do so, any changes made on other~n"
+        "cluster nodes after this one was shut down may be lost.~n",
     {Err, Nodes} =
         case AllNodes -- [node()] of
             [] -> {"Timeout contacting cluster nodes. Since RabbitMQ was"
                    " shut down forcefully~nit cannot determine which nodes"
-                   " are timing out.~n", []};
+                   " are timing out.~n" ++ Suffix, []};
             Ns -> {rabbit_misc:format(
-                     "Timeout contacting cluster nodes: ~p.~n", [Ns]),
+                     "Timeout contacting cluster nodes: ~p.~n" ++ Suffix, [Ns]),
                    Ns}
         end,
-    basic_boot_error(Term,
-                     Err ++ rabbit_nodes:diagnostics(Nodes) ++ "~n~n", []);
+    log_boot_error_and_exit(
+      timeout_waiting_for_tables,
+      Err ++ rabbit_nodes:diagnostics(Nodes) ++ "~n~n", []);
 boot_error(Reason, Stacktrace) ->
-    Fmt = "Error description:~n   ~p~n~n" ++
+    Fmt = "Error description:~n   ~p~n~n"
         "Log files (may contain more information):~n   ~s~n   ~s~n~n",
     Args = [Reason, log_location(kernel), log_location(sasl)],
     boot_error(Reason, Fmt, Args, Stacktrace).
@@ -596,16 +638,16 @@ boot_error(Reason, Stacktrace) ->
                       -> no_return()).
 -endif.
 boot_error(Reason, Fmt, Args, not_available) ->
-    basic_boot_error(Reason, Fmt, Args);
+    log_boot_error_and_exit(Reason, Fmt, Args);
 boot_error(Reason, Fmt, Args, Stacktrace) ->
-    basic_boot_error(Reason, Fmt ++ "Stack trace:~n   ~p~n~n",
-                     Args ++ [Stacktrace]).
+    log_boot_error_and_exit(Reason, Fmt ++ "Stack trace:~n   ~p~n~n",
+                            Args ++ [Stacktrace]).
 
-basic_boot_error(Reason, Format, Args) ->
+log_boot_error_and_exit(Reason, Format, Args) ->
     io:format("~n~nBOOT FAILED~n===========~n~n" ++ Format, Args),
-    rabbit_misc:local_info_msg(Format, Args),
+    rabbit_log:info(Format, Args),
     timer:sleep(1000),
-    exit({?MODULE, failure_during_boot, Reason}).
+    exit(Reason).
 
 %%---------------------------------------------------------------------------
 %% boot step functions
@@ -622,8 +664,8 @@ recover() ->
     rabbit_amqqueue:start(Qs).
 
 maybe_insert_default_data() ->
-    case rabbit_table:is_empty() of
-        true -> insert_default_data();
+    case rabbit_table:needs_default_data() of
+        true  -> insert_default_data();
         false -> ok
     end.
 
@@ -727,11 +769,11 @@ force_event_refresh(Ref) ->
 %% misc
 
 log_broker_started(Plugins) ->
-    rabbit_misc:with_local_io(
+    rabbit_log:with_local_io(
       fun() ->
               PluginList = iolist_to_binary([rabbit_misc:format(" * ~s~n", [P])
                                              || P <- Plugins]),
-              error_logger:info_msg(
+              rabbit_log:info(
                 "Server startup complete; ~b plugins started.~n~s",
                 [length(Plugins), PluginList]),
               io:format(" completed with ~p plugins.~n", [length(Plugins)])
@@ -780,7 +822,31 @@ log_banner() ->
                     {K, V} ->
                         Format(K, V)
                 end || S <- Settings]),
-    error_logger:info_msg("~s", [Banner]).
+    rabbit_log:info("~s", [Banner]).
+
+warn_if_kernel_config_dubious() ->
+    case erlang:system_info(kernel_poll) of
+        true  -> ok;
+        false -> rabbit_log:warning(
+                   "Kernel poll (epoll, kqueue, etc) is disabled. Throughput "
+                   "and CPU utilization may worsen.~n")
+    end,
+    AsyncThreads = erlang:system_info(thread_pool_size),
+    case AsyncThreads < ?ASYNC_THREADS_WARNING_THRESHOLD of
+        true  -> rabbit_log:warning(
+                   "Erlang VM is running with ~b I/O threads, "
+                   "file I/O performance may worsen~n", [AsyncThreads]);
+        false -> ok
+    end,
+    IDCOpts = case application:get_env(kernel, inet_default_connect_options) of
+                  undefined -> [];
+                  {ok, Val} -> Val
+              end,
+    case proplists:get_value(nodelay, IDCOpts, false) of
+        false -> rabbit_log:warning("Nagle's algorithm is enabled for sockets, "
+                                    "network I/O latency will be higher~n");
+        true  -> ok
+    end.
 
 home_dir() ->
     case init:get_argument(home) of
@@ -819,6 +885,33 @@ config_setting() ->
 %% We don't want this in fhc since it references rabbit stuff. And we can't put
 %% this in the bootstep directly.
 start_fhc() ->
-    rabbit_sup:start_restartable_child(
+    ok = rabbit_sup:start_restartable_child(
       file_handle_cache,
-      [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]).
+      [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]),
+    ensure_working_fhc().
+
+ensure_working_fhc() ->
+    %% To test the file handle cache, we simply read a file we know it
+    %% exists (Erlang kernel's .app file).
+    %%
+    %% To avoid any pollution of the application process' dictionary by
+    %% file_handle_cache, we spawn a separate process.
+    Parent = self(),
+    TestFun = fun() ->
+        Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"),
+        {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []),
+        {ok, _} = file_handle_cache:read(Fd, 1),
+        ok = file_handle_cache:close(Fd),
+        Parent ! fhc_ok
+    end,
+    TestPid = spawn_link(TestFun),
+    %% Because we are waiting for the test fun, abuse the
+    %% 'mnesia_table_loading_timeout' parameter to find a sane timeout
+    %% value.
+    Timeout = rabbit_table:wait_timeout(),
+    receive
+        fhc_ok                       -> ok;
+        {'EXIT', TestPid, Exception} -> throw({ensure_working_fhc, Exception})
+    after Timeout ->
+            throw({ensure_working_fhc, {timeout, TestPid}})
+    end.
index b0a9c0d807b2e5a698fcdcda71aa7c1e67e402ca..fc7a59c7433f255e73d3f1367dabd6505048426b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_access_control).
@@ -19,7 +19,7 @@
 -include("rabbit.hrl").
 
 -export([check_user_pass_login/2, check_user_login/2, check_user_loopback/2,
-         check_vhost_access/2, check_resource_access/3]).
+         check_vhost_access/3, check_resource_access/3]).
 
 %%----------------------------------------------------------------------------
 
 
 -spec(check_user_pass_login/2 ::
         (rabbit_types:username(), rabbit_types:password())
-        -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
+        -> {'ok', rabbit_types:user()} |
+           {'refused', rabbit_types:username(), string(), [any()]}).
 -spec(check_user_login/2 ::
         (rabbit_types:username(), [{atom(), any()}])
-        -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
+        -> {'ok', rabbit_types:user()} |
+           {'refused', rabbit_types:username(), string(), [any()]}).
 -spec(check_user_loopback/2 :: (rabbit_types:username(),
                                 rabbit_net:socket() | inet:ip_address())
         -> 'ok' | 'not_allowed').
--spec(check_vhost_access/2 ::
-        (rabbit_types:user(), rabbit_types:vhost())
+-spec(check_vhost_access/3 ::
+        (rabbit_types:user(), rabbit_types:vhost(), rabbit_net:socket())
         -> 'ok' | rabbit_types:channel_exit()).
 -spec(check_resource_access/3 ::
         (rabbit_types:user(), rabbit_types:r(atom()), permission_atom())
@@ -55,36 +57,71 @@ check_user_pass_login(Username, Password) ->
 check_user_login(Username, AuthProps) ->
     {ok, Modules} = application:get_env(rabbit, auth_backends),
     R = lists:foldl(
-          fun ({ModN, ModZ}, {refused, _, _}) ->
+          fun ({ModN, ModZs0}, {refused, _, _, _}) ->
+                  ModZs = case ModZs0 of
+                              A when is_atom(A) -> [A];
+                              L when is_list(L) -> L
+                          end,
                   %% Different modules for authN vs authZ. So authenticate
                   %% with authN module, then if that succeeds do
-                  %% passwordless (i.e pre-authenticated) login with authZ
-                  %% module, and use the #user{} the latter gives us.
-                  case try_login(ModN, Username, AuthProps) of
-                      {ok, _} -> try_login(ModZ, Username, []);
-                      Else    -> Else
+                  %% passwordless (i.e pre-authenticated) login with authZ.
+                  case try_authenticate(ModN, Username, AuthProps) of
+                      {ok, ModNUser = #auth_user{username = Username2}} ->
+                          user(ModNUser, try_authorize(ModZs, Username2));
+                      Else ->
+                          Else
                   end;
-              (Mod, {refused, _, _}) ->
+              (Mod, {refused, _, _, _}) ->
                   %% Same module for authN and authZ. Just take the result
                   %% it gives us
-                  try_login(Mod, Username, AuthProps);
+                  case try_authenticate(Mod, Username, AuthProps) of
+                      {ok, ModNUser = #auth_user{impl = Impl}} ->
+                          user(ModNUser, {ok, [{Mod, Impl}]});
+                      Else ->
+                          Else
+                  end;
               (_, {ok, User}) ->
                   %% We've successfully authenticated. Skip to the end...
                   {ok, User}
-          end, {refused, "No modules checked '~s'", [Username]}, Modules),
-    rabbit_event:notify(case R of
-                            {ok, _User} -> user_authentication_success;
-                            _           -> user_authentication_failure
-                        end, [{name, Username}]),
+          end,
+          {refused, Username, "No modules checked '~s'", [Username]}, Modules),
     R.
 
-try_login(Module, Username, AuthProps) ->
-    case Module:check_user_login(Username, AuthProps) of
-        {error, E} -> {refused, "~s failed authenticating ~s: ~p~n",
-                       [Module, Username, E]};
-        Else       -> Else
+try_authenticate(Module, Username, AuthProps) ->
+    case Module:user_login_authentication(Username, AuthProps) of
+        {ok, AuthUser}  -> {ok, AuthUser};
+        {error, E}      -> {refused, Username,
+                            "~s failed authenticating ~s: ~p~n",
+                            [Module, Username, E]};
+        {refused, F, A} -> {refused, Username, F, A}
     end.
 
+try_authorize(Modules, Username) ->
+    lists:foldr(
+      fun (Module, {ok, ModsImpls}) ->
+              case Module:user_login_authorization(Username) of
+                  {ok, Impl}      -> {ok, [{Module, Impl} | ModsImpls]};
+                  {error, E}      -> {refused, Username,
+                                        "~s failed authorizing ~s: ~p~n",
+                                        [Module, Username, E]};
+                  {refused, F, A} -> {refused, Username, F, A}
+              end;
+          (_,      {refused, F, A}) ->
+              {refused, Username, F, A}
+      end, {ok, []}, Modules).
+
+user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls}) ->
+    {ok, #user{username       = Username,
+               tags           = Tags,
+               authz_backends = ModZImpls}};
+user(_AuthUser, Error) ->
+    Error.
+
+auth_user(#user{username = Username, tags = Tags}, Impl) ->
+    #auth_user{username = Username,
+               tags     = Tags,
+               impl     = Impl}.
+
 check_user_loopback(Username, SockOrAddr) ->
     {ok, Users} = application:get_env(rabbit, loopback_users),
     case rabbit_net:is_loopback(SockOrAddr)
@@ -93,29 +130,38 @@ check_user_loopback(Username, SockOrAddr) ->
         false -> not_allowed
     end.
 
-check_vhost_access(User = #user{ username     = Username,
-                                 auth_backend = Module }, VHostPath) ->
-    check_access(
-      fun() ->
-              %% TODO this could be an andalso shortcut under >R13A
-              case rabbit_vhost:exists(VHostPath) of
-                  false -> false;
-                  true  -> Module:check_vhost_access(User, VHostPath)
-              end
-      end,
-      Module, "access to vhost '~s' refused for user '~s'",
-      [VHostPath, Username]).
+check_vhost_access(User = #user{username       = Username,
+                                authz_backends = Modules}, VHostPath, Sock) ->
+    lists:foldl(
+      fun({Mod, Impl}, ok) ->
+              check_access(
+                fun() ->
+                        rabbit_vhost:exists(VHostPath) andalso
+                            Mod:check_vhost_access(
+                              auth_user(User, Impl), VHostPath, Sock)
+                end,
+                Mod, "access to vhost '~s' refused for user '~s'",
+                [VHostPath, Username]);
+         (_, Else) ->
+              Else
+      end, ok, Modules).
 
 check_resource_access(User, R = #resource{kind = exchange, name = <<"">>},
                       Permission) ->
     check_resource_access(User, R#resource{name = <<"amq.default">>},
                           Permission);
-check_resource_access(User = #user{username = Username, auth_backend = Module},
+check_resource_access(User = #user{username       = Username,
+                                   authz_backends = Modules},
                       Resource, Permission) ->
-    check_access(
-      fun() -> Module:check_resource_access(User, Resource, Permission) end,
-      Module, "access to ~s refused for user '~s'",
-      [rabbit_misc:rs(Resource), Username]).
+    lists:foldl(
+      fun({Module, Impl}, ok) ->
+              check_access(
+                fun() -> Module:check_resource_access(
+                           auth_user(User, Impl), Resource, Permission) end,
+                Module, "access to ~s refused for user '~s'",
+                [rabbit_misc:rs(Resource), Username]);
+         (_, Else) -> Else
+      end, ok, Modules).
 
 check_access(Fun, Module, ErrStr, ErrArgs) ->
     Allow = case Fun() of
index 308f9a2e0a907acf70322e7fe503a69073e935ee..557fa31335d236e503040218793344a1fa55d295 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_alarm).
index 1aba7ecbd154c9cbf15bf9ce41c69337a39017e0..5bfa006e09051529857be7b2aead608866f2eb02 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_amqqueue).
 
 -export([recover/0, stop/0, start/1, declare/5, declare/6,
-         delete_immediately/1, delete/3, purge/1, forget_all_durable/1]).
--export([pseudo_queue/2]).
+         delete_immediately/1, delete/3, purge/1, forget_all_durable/1,
+         delete_crashed/1, delete_crashed_internal/1]).
+-export([pseudo_queue/2, immutable/1]).
 -export([lookup/1, not_found_or_absent/1, with/2, with/3, with_or_die/2,
          assert_equivalence/5,
          check_exclusive_access/2, with_exclusive_access_or_die/3,
-         stat/1, deliver/2, deliver_flow/2, requeue/3, ack/3, reject/4]).
+         stat/1, deliver/2, requeue/3, ack/3, reject/4]).
 -export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]).
+-export([list_down/1]).
 -export([force_event_refresh/1, notify_policy_changed/1]).
 -export([consumers/1, consumers_all/1, consumer_info_keys/0]).
 -export([basic_get/4, basic_consume/10, basic_cancel/4, notify_decorators/1]).
 -export([notify_sent/2, notify_sent_queue_down/1, resume/2]).
 -export([notify_down_all/2, activate_limit_all/2, credit/5]).
--export([on_node_down/1]).
--export([update/2, store_queue/1, policy_changed/2]).
+-export([on_node_up/1, on_node_down/1]).
+-export([update/2, store_queue/1, update_decorators/1, policy_changed/2]).
 -export([start_mirroring/1, stop_mirroring/1, sync_mirrors/1,
          cancel_sync_mirrors/1]).
 
@@ -49,7 +51,7 @@
 
 -ifdef(use_specs).
 
--export_type([name/0, qmsg/0]).
+-export_type([name/0, qmsg/0, absent_reason/0]).
 
 -type(name() :: rabbit_types:r('queue')).
 -type(qpids() :: [pid()]).
 -type(msg_id() :: non_neg_integer()).
 -type(ok_or_errors() ::
         'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}).
+-type(absent_reason() :: 'nodedown' | 'crashed').
 -type(queue_or_absent() :: rabbit_types:amqqueue() |
-                           {'absent', rabbit_types:amqqueue()}).
--type(not_found_or_absent() :: 'not_found' |
-                               {'absent', rabbit_types:amqqueue()}).
+                           {'absent', rabbit_types:amqqueue(),absent_reason()}).
+-type(not_found_or_absent() ::
+        'not_found' | {'absent', rabbit_types:amqqueue(), absent_reason()}).
 -spec(recover/0 :: () -> [rabbit_types:amqqueue()]).
 -spec(stop/0 :: () -> 'ok').
 -spec(start/1 :: ([rabbit_types:amqqueue()]) -> 'ok').
@@ -74,8 +77,9 @@
 -spec(declare/6 ::
         (name(), boolean(), boolean(),
          rabbit_framing:amqp_table(), rabbit_types:maybe(pid()), node())
-        -> {'new' | 'existing' | 'absent' | 'owner_died',
-            rabbit_types:amqqueue()} | rabbit_types:channel_exit()).
+        -> {'new' | 'existing' | 'owner_died', rabbit_types:amqqueue()} |
+           {'absent', rabbit_types:amqqueue(), absent_reason()} |
+           rabbit_types:channel_exit()).
 -spec(internal_declare/2 ::
         (rabbit_types:amqqueue(), boolean())
         -> queue_or_absent() | rabbit_misc:thunk(queue_or_absent())).
         (name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()).
 -spec(list/0 :: () -> [rabbit_types:amqqueue()]).
 -spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
+-spec(list_down/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
 -spec(info_keys/0 :: () -> rabbit_types:info_keys()).
 -spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()).
 -spec(info/2 ::
         -> qlen() |
            rabbit_types:error('in_use') |
            rabbit_types:error('not_empty')).
+-spec(delete_crashed/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(delete_crashed_internal/1 :: (rabbit_types:amqqueue()) -> 'ok').
 -spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()).
 -spec(forget_all_durable/1 :: (node()) -> 'ok').
 -spec(deliver/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
                         qpids()).
--spec(deliver_flow/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
-                             qpids()).
 -spec(requeue/3 :: (pid(), [msg_id()],  pid()) -> 'ok').
 -spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok').
 -spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok').
          (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok').
 -spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok').
 -spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
+-spec(on_node_up/1 :: (node()) -> 'ok').
 -spec(on_node_down/1 :: (node()) -> 'ok').
 -spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()).
+-spec(immutable/1 :: (rabbit_types:amqqueue()) -> rabbit_types:amqqueue()).
 -spec(store_queue/1 :: (rabbit_types:amqqueue()) -> 'ok').
+-spec(update_decorators/1 :: (name()) -> 'ok').
 -spec(policy_changed/2 ::
         (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
 -spec(start_mirroring/1 :: (pid()) -> 'ok').
@@ -206,14 +214,14 @@ recover() ->
         BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]),
     {ok,_} = supervisor:start_child(
                rabbit_sup,
-               {rabbit_amqqueue_sup,
-                {rabbit_amqqueue_sup, start_link, []},
-                transient, infinity, supervisor, [rabbit_amqqueue_sup]}),
+               {rabbit_amqqueue_sup_sup,
+                {rabbit_amqqueue_sup_sup, start_link, []},
+                transient, infinity, supervisor, [rabbit_amqqueue_sup_sup]}),
     recover_durable_queues(lists:zip(DurableQueues, OrderedRecoveryTerms)).
 
 stop() ->
-    ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup),
-    ok = supervisor:delete_child(rabbit_sup, rabbit_amqqueue_sup),
+    ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup_sup),
+    ok = supervisor:delete_child(rabbit_sup, rabbit_amqqueue_sup_sup),
     {ok, BQ} = application:get_env(rabbit, backing_queue_module),
     ok = BQ:stop().
 
@@ -238,9 +246,9 @@ find_durable_queues() ->
 
 recover_durable_queues(QueuesAndRecoveryTerms) ->
     {Results, Failures} =
-        gen_server2:mcall([{start_queue_process(node(), Q),
-                            {init, {self(), Terms}}} ||
-                              {Q, Terms} <- QueuesAndRecoveryTerms]),
+        gen_server2:mcall(
+          [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery),
+            {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
     [rabbit_log:error("Queue ~p failed to initialise: ~p~n",
                       [Pid, Error]) || {Pid, Error} <- Failures],
     [Q || {_, {new, Q}} <- Results].
@@ -254,39 +262,44 @@ declare(QueueName, Durable, AutoDelete, Args, Owner) ->
 %% effect) this might not be possible to satisfy.
 declare(QueueName, Durable, AutoDelete, Args, Owner, Node) ->
     ok = check_declare_arguments(QueueName, Args),
-    Q = rabbit_policy:set(#amqqueue{name            = QueueName,
-                                    durable         = Durable,
-                                    auto_delete     = AutoDelete,
-                                    arguments       = Args,
-                                    exclusive_owner = Owner,
-                                    pid             = none,
-                                    slave_pids      = [],
-                                    sync_slave_pids = [],
-                                    gm_pids         = []}),
+    Q = rabbit_queue_decorator:set(
+          rabbit_policy:set(#amqqueue{name               = QueueName,
+                                      durable            = Durable,
+                                      auto_delete        = AutoDelete,
+                                      arguments          = Args,
+                                      exclusive_owner    = Owner,
+                                      pid                = none,
+                                      slave_pids         = [],
+                                      sync_slave_pids    = [],
+                                      recoverable_slaves = [],
+                                      gm_pids            = [],
+                                      state              = live})),
     Node = rabbit_mirror_queue_misc:initial_queue_node(Q, Node),
-    gen_server2:call(start_queue_process(Node, Q), {init, new}, infinity).
+    gen_server2:call(
+      rabbit_amqqueue_sup_sup:start_queue_process(Node, Q, declare),
+      {init, new}, infinity).
 
 internal_declare(Q, true) ->
     rabbit_misc:execute_mnesia_tx_with_tail(
-      fun () -> ok = store_queue(Q), rabbit_misc:const(Q) end);
+      fun () ->
+              ok = store_queue(Q#amqqueue{state = live}),
+              rabbit_misc:const(Q)
+      end);
 internal_declare(Q = #amqqueue{name = QueueName}, false) ->
     rabbit_misc:execute_mnesia_tx_with_tail(
       fun () ->
               case mnesia:wread({rabbit_queue, QueueName}) of
                   [] ->
                       case not_found_or_absent(QueueName) of
-                          not_found        -> Q1 = rabbit_policy:set(Q),
-                                              ok = store_queue(Q1),
-                                              B = add_default_binding(Q1),
-                                              fun () -> B(), Q1 end;
-                          {absent, _Q} = R -> rabbit_misc:const(R)
+                          not_found           -> Q1 = rabbit_policy:set(Q),
+                                                 Q2 = Q1#amqqueue{state = live},
+                                                 ok = store_queue(Q2),
+                                                 B = add_default_binding(Q1),
+                                                 fun () -> B(), Q1 end;
+                          {absent, _Q, _} = R -> rabbit_misc:const(R)
                       end;
-                  [ExistingQ = #amqqueue{pid = QPid}] ->
-                      case rabbit_misc:is_process_alive(QPid) of
-                          true  -> rabbit_misc:const(ExistingQ);
-                          false -> TailFun = internal_delete(QueueName),
-                                   fun () -> TailFun(), ExistingQ end
-                      end
+                  [ExistingQ] ->
+                      rabbit_misc:const(ExistingQ)
               end
       end).
 
@@ -308,12 +321,24 @@ store_queue(Q = #amqqueue{durable = true}) ->
     ok = mnesia:write(rabbit_durable_queue,
                       Q#amqqueue{slave_pids      = [],
                                  sync_slave_pids = [],
-                                 gm_pids         = []}, write),
-    ok = mnesia:write(rabbit_queue, Q, write),
-    ok;
+                                 gm_pids         = [],
+                                 decorators      = undefined}, write),
+    store_queue_ram(Q);
 store_queue(Q = #amqqueue{durable = false}) ->
-    ok = mnesia:write(rabbit_queue, Q, write),
-    ok.
+    store_queue_ram(Q).
+
+store_queue_ram(Q) ->
+    ok = mnesia:write(rabbit_queue, rabbit_queue_decorator:set(Q), write).
+
+update_decorators(Name) ->
+    rabbit_misc:execute_mnesia_transaction(
+      fun() ->
+              case mnesia:wread({rabbit_queue, Name}) of
+                  [Q] -> store_queue_ram(Q),
+                         ok;
+                  []  -> ok
+              end
+      end).
 
 policy_changed(Q1 = #amqqueue{decorators = Decorators1},
                Q2 = #amqqueue{decorators = Decorators2}) ->
@@ -325,10 +350,6 @@ policy_changed(Q1 = #amqqueue{decorators = Decorators1},
     %% mirroring-related has changed - the policy may have changed anyway.
     notify_policy_changed(Q1).
 
-start_queue_process(Node, Q) ->
-    {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, [Q]),
-    Pid.
-
 add_default_binding(#amqqueue{name = QueueName}) ->
     ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>),
     RoutingKey = QueueName#resource.name,
@@ -351,7 +372,7 @@ not_found_or_absent(Name) ->
     %% rabbit_queue and not found anything
     case mnesia:read({rabbit_durable_queue, Name}) of
         []  -> not_found;
-        [Q] -> {absent, Q} %% Q exists on stopped node
+        [Q] -> {absent, Q, nodedown} %% Q exists on stopped node
     end.
 
 not_found_or_absent_dirty(Name) ->
@@ -360,11 +381,13 @@ not_found_or_absent_dirty(Name) ->
     %% and only affect the error kind.
     case rabbit_misc:dirty_read({rabbit_durable_queue, Name}) of
         {error, not_found} -> not_found;
-        {ok, Q}            -> {absent, Q}
+        {ok, Q}            -> {absent, Q, nodedown}
     end.
 
 with(Name, F, E) ->
     case lookup(Name) of
+        {ok, Q = #amqqueue{state = crashed}} ->
+            E({absent, Q, crashed});
         {ok, Q = #amqqueue{pid = QPid}} ->
             %% We check is_process_alive(QPid) in case we receive a
             %% nodedown (for example) in F() that has nothing to do
@@ -373,7 +396,7 @@ with(Name, F, E) ->
             %% indicates a code bug and we don't want to get stuck in
             %% the retry loop.
             rabbit_misc:with_exit_handler(
-              fun () -> false = rabbit_misc:is_process_alive(QPid),
+              fun () -> false = rabbit_mnesia:is_process_alive(QPid),
                         timer:sleep(25),
                         with(Name, F, E)
               end, fun () -> F(Q) end);
@@ -384,20 +407,18 @@ with(Name, F, E) ->
 with(Name, F) -> with(Name, F, fun (E) -> {error, E} end).
 
 with_or_die(Name, F) ->
-    with(Name, F, fun (not_found)   -> rabbit_misc:not_found(Name);
-                      ({absent, Q}) -> rabbit_misc:absent(Q)
+    with(Name, F, fun (not_found)           -> rabbit_misc:not_found(Name);
+                      ({absent, Q, Reason}) -> rabbit_misc:absent(Q, Reason)
                   end).
 
-assert_equivalence(#amqqueue{durable     = Durable,
-                             auto_delete = AutoDelete} = Q,
-                   Durable, AutoDelete, RequiredArgs, Owner) ->
-    assert_args_equivalence(Q, RequiredArgs),
-    check_exclusive_access(Q, Owner, strict);
-assert_equivalence(#amqqueue{name = QueueName},
-                   _Durable, _AutoDelete, _RequiredArgs, _Owner) ->
-    rabbit_misc:protocol_error(
-      precondition_failed, "parameters for ~s not equivalent",
-      [rabbit_misc:rs(QueueName)]).
+assert_equivalence(#amqqueue{name        = QName,
+                             durable     = Durable,
+                             auto_delete = AD} = Q,
+                   Durable1, AD1, Args1, Owner) ->
+    rabbit_misc:assert_field_equivalence(Durable, Durable1, QName, durable),
+    rabbit_misc:assert_field_equivalence(AD, AD1, QName, auto_delete),
+    assert_args_equivalence(Q, Args1),
+    check_exclusive_access(Q, Owner, strict).
 
 check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax).
 
@@ -443,8 +464,11 @@ check_arguments(QueueName, Args, Validators) ->
 declare_args() ->
     [{<<"x-expires">>,                 fun check_expires_arg/2},
      {<<"x-message-ttl">>,             fun check_message_ttl_arg/2},
+     {<<"x-dead-letter-exchange">>,    fun check_dlxname_arg/2},
      {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
-     {<<"x-max-length">>,              fun check_non_neg_int_arg/2}].
+     {<<"x-max-length">>,              fun check_non_neg_int_arg/2},
+     {<<"x-max-length-bytes">>,        fun check_non_neg_int_arg/2},
+     {<<"x-max-priority">>,            fun check_non_neg_int_arg/2}].
 
 consume_args() -> [{<<"x-priority">>,              fun check_int_arg/2},
                    {<<"x-cancel-on-ha-failover">>, fun check_bool_arg/2}].
@@ -478,6 +502,11 @@ check_message_ttl_arg({Type, Val}, Args) ->
         Error -> Error
     end.
 
+%% Note that the validity of x-dead-letter-exchange is already verified
+%% by rabbit_channel's queue.declare handler.
+check_dlxname_arg({longstr, _}, _) -> ok;
+check_dlxname_arg({Type,    _}, _) -> {error, {unacceptable_type, Type}}.
+
 check_dlxrk_arg({longstr, _}, Args) ->
     case rabbit_misc:table_lookup(Args, <<"x-dead-letter-exchange">>) of
         undefined -> {error, routing_key_but_no_dlx_defined};
@@ -488,32 +517,68 @@ check_dlxrk_arg({Type,    _}, _Args) ->
 
 list() -> mnesia:dirty_match_object(rabbit_queue, #amqqueue{_ = '_'}).
 
+list(VHostPath) -> list(VHostPath, rabbit_queue).
+
 %% Not dirty_match_object since that would not be transactional when used in a
 %% tx context
-list(VHostPath) ->
+list(VHostPath, TableName) ->
     mnesia:async_dirty(
       fun () ->
               mnesia:match_object(
-                rabbit_queue,
+                TableName,
                 #amqqueue{name = rabbit_misc:r(VHostPath, queue), _ = '_'},
                 read)
       end).
 
+list_down(VHostPath) ->
+    Present = list(VHostPath),
+    Durable = list(VHostPath, rabbit_durable_queue),
+    PresentS = sets:from_list([N || #amqqueue{name = N} <- Present]),
+    sets:to_list(sets:filter(fun (#amqqueue{name = N}) ->
+                                     not sets:is_element(N, PresentS)
+                             end, sets:from_list(Durable))).
+
 info_keys() -> rabbit_amqqueue_process:info_keys().
 
-map(VHostPath, F) -> rabbit_misc:filter_exit_map(F, list(VHostPath)).
+map(Qs, F) -> rabbit_misc:filter_exit_map(F, Qs).
 
+info(Q = #amqqueue{ state = crashed }) -> info_down(Q, crashed);
 info(#amqqueue{ pid = QPid }) -> delegate:call(QPid, info).
 
+info(Q = #amqqueue{ state = crashed }, Items) ->
+    info_down(Q, Items, crashed);
 info(#amqqueue{ pid = QPid }, Items) ->
     case delegate:call(QPid, {info, Items}) of
         {ok, Res}      -> Res;
         {error, Error} -> throw(Error)
     end.
 
-info_all(VHostPath) -> map(VHostPath, fun (Q) -> info(Q) end).
+info_down(Q, DownReason) ->
+    info_down(Q, rabbit_amqqueue_process:info_keys(), DownReason).
+
+info_down(Q, Items, DownReason) ->
+    [{Item, i_down(Item, Q, DownReason)} || Item <- Items].
+
+i_down(name,               #amqqueue{name               = Name}, _) -> Name;
+i_down(durable,            #amqqueue{durable            = Dur},  _) -> Dur;
+i_down(auto_delete,        #amqqueue{auto_delete        = AD},   _) -> AD;
+i_down(arguments,          #amqqueue{arguments          = Args}, _) -> Args;
+i_down(pid,                #amqqueue{pid                = QPid}, _) -> QPid;
+i_down(recoverable_slaves, #amqqueue{recoverable_slaves = RS},   _) -> RS;
+i_down(state, _Q, DownReason)                                     -> DownReason;
+i_down(K, _Q, _DownReason) ->
+    case lists:member(K, rabbit_amqqueue_process:info_keys()) of
+        true  -> '';
+        false -> throw({bad_argument, K})
+    end.
+
+info_all(VHostPath) ->
+    map(list(VHostPath), fun (Q) -> info(Q) end) ++
+        map(list_down(VHostPath), fun (Q) -> info_down(Q, down) end).
 
-info_all(VHostPath, Items) -> map(VHostPath, fun (Q) -> info(Q, Items) end).
+info_all(VHostPath, Items) ->
+    map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
+        map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
 
 force_event_refresh(Ref) ->
     [gen_server2:cast(Q#amqqueue.pid,
@@ -530,7 +595,7 @@ consumer_info_keys() -> ?CONSUMER_INFO_KEYS.
 consumers_all(VHostPath) ->
     ConsumerInfoKeys=consumer_info_keys(),
     lists:append(
-      map(VHostPath,
+      map(list(VHostPath),
           fun (Q) ->
               [lists:zip(
                  ConsumerInfoKeys,
@@ -547,11 +612,15 @@ delete_immediately(QPids) ->
 delete(#amqqueue{ pid = QPid }, IfUnused, IfEmpty) ->
     delegate:call(QPid, {delete, IfUnused, IfEmpty}).
 
-purge(#amqqueue{ pid = QPid }) -> delegate:call(QPid, purge).
+delete_crashed(#amqqueue{ pid = QPid } = Q) ->
+    ok = rpc:call(node(QPid), ?MODULE, delete_crashed_internal, [Q]).
 
-deliver(Qs, Delivery) -> deliver(Qs, Delivery, noflow).
+delete_crashed_internal(Q = #amqqueue{ name = QName }) ->
+    {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+    BQ:delete_crashed(Q),
+    ok = internal_delete(QName).
 
-deliver_flow(Qs, Delivery) -> deliver(Qs, Delivery, flow).
+purge(#amqqueue{ pid = QPid }) -> delegate:call(QPid, purge).
 
 requeue(QPid, MsgIds, ChPid) -> delegate:call(QPid, {requeue, MsgIds, ChPid}).
 
@@ -612,7 +681,7 @@ notify_sent_queue_down(QPid) ->
 
 resume(QPid, ChPid) -> delegate:cast(QPid, {resume, ChPid}).
 
-internal_delete1(QueueName) ->
+internal_delete1(QueueName, OnlyDurable) ->
     ok = mnesia:delete({rabbit_queue, QueueName}),
     %% this 'guarded' delete prevents unnecessary writes to the mnesia
     %% disk log
@@ -622,7 +691,7 @@ internal_delete1(QueueName) ->
     end,
     %% we want to execute some things, as decided by rabbit_exchange,
     %% after the transaction.
-    rabbit_binding:remove_for_destination(QueueName).
+    rabbit_binding:remove_for_destination(QueueName, OnlyDurable).
 
 internal_delete(QueueName) ->
     rabbit_misc:execute_mnesia_tx_with_tail(
@@ -632,7 +701,7 @@ internal_delete(QueueName) ->
                   {[], []} ->
                       rabbit_misc:const({error, not_found});
                   _ ->
-                      Deletions = internal_delete1(QueueName),
+                      Deletions = internal_delete1(QueueName, false),
                       T = rabbit_binding:process_deletions(Deletions),
                       fun() ->
                               ok = T(),
@@ -650,15 +719,49 @@ forget_all_durable(Node) ->
           fun () ->
                   Qs = mnesia:match_object(rabbit_durable_queue,
                                            #amqqueue{_ = '_'}, write),
-                  [rabbit_binding:process_deletions(
-                     internal_delete1(Name)) ||
-                      #amqqueue{name = Name, pid = Pid} = Q <- Qs,
-                      node(Pid) =:= Node,
-                      rabbit_policy:get(<<"ha-mode">>, Q) =:= undefined],
+                  [forget_node_for_queue(Node, Q) ||
+                      #amqqueue{pid = Pid} = Q <- Qs,
+                      node(Pid) =:= Node],
                   ok
           end),
     ok.
 
+%% Try to promote a slave while down - it should recover as a
+%% master. We try to take the oldest slave here for best chance of
+%% recovery.
+forget_node_for_queue(DeadNode, Q = #amqqueue{recoverable_slaves = RS}) ->
+    forget_node_for_queue(DeadNode, RS, Q).
+
+forget_node_for_queue(_DeadNode, [], #amqqueue{name = Name}) ->
+    %% No slaves to recover from, queue is gone.
+    %% Don't process_deletions since that just calls callbacks and we
+    %% are not really up.
+    internal_delete1(Name, true);
+
+%% Should not happen, but let's be conservative.
+forget_node_for_queue(DeadNode, [DeadNode | T], Q) ->
+    forget_node_for_queue(DeadNode, T, Q);
+
+forget_node_for_queue(DeadNode, [H|T], Q) ->
+    case node_permits_offline_promotion(H) of
+        false -> forget_node_for_queue(DeadNode, T, Q);
+        true  -> Q1 = Q#amqqueue{pid = rabbit_misc:node_to_fake_pid(H)},
+                 ok = mnesia:write(rabbit_durable_queue, Q1, write)
+    end.
+
+node_permits_offline_promotion(Node) ->
+    case node() of
+        Node -> not rabbit:is_running(); %% [1]
+        _    -> Running = rabbit_mnesia:cluster_nodes(running),
+                not lists:member(Node, Running) %% [2]
+    end.
+%% [1] In this case if we are a real running node (i.e. rabbitmqctl
+%% has RPCed into us) then we cannot allow promotion. If on the other
+%% hand we *are* rabbitmqctl impersonating the node for offline
+%% node-forgetting then we can.
+%%
+%% [2] This is simpler; as long as it's down that's OK
+
 run_backing_queue(QPid, Mod, Fun) ->
     gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
 
@@ -674,6 +777,46 @@ stop_mirroring(QPid)  -> ok = delegate:cast(QPid, stop_mirroring).
 sync_mirrors(QPid)        -> delegate:call(QPid, sync_mirrors).
 cancel_sync_mirrors(QPid) -> delegate:call(QPid, cancel_sync_mirrors).
 
+on_node_up(Node) ->
+    ok = rabbit_misc:execute_mnesia_transaction(
+           fun () ->
+                   Qs = mnesia:match_object(rabbit_queue,
+                                            #amqqueue{_ = '_'}, write),
+                   [maybe_clear_recoverable_node(Node, Q) || Q <- Qs],
+                   ok
+           end).
+
+maybe_clear_recoverable_node(Node,
+                             #amqqueue{sync_slave_pids    = SPids,
+                                       recoverable_slaves = RSs} = Q) ->
+    case lists:member(Node, RSs) of
+        true  ->
+            %% There is a race with
+            %% rabbit_mirror_queue_slave:record_synchronised/1 called
+            %% by the incoming slave node and this function, called
+            %% by the master node. If this function is executed after
+            %% record_synchronised/1, the node is erroneously removed
+            %% from the recoverable slaves list.
+            %%
+            %% We check if the slave node's queue PID is alive. If it is
+            %% the case, then this function is executed after. In this
+            %% situation, we don't touch the queue record, it is already
+            %% correct.
+            DoClearNode =
+                case [SP || SP <- SPids, node(SP) =:= Node] of
+                    [SPid] -> not rabbit_misc:is_process_alive(SPid);
+                    _      -> true
+                end,
+            if
+                DoClearNode -> RSs1 = RSs -- [Node],
+                               store_queue(
+                                 Q#amqqueue{recoverable_slaves = RSs1});
+                true        -> ok
+            end;
+        false ->
+            ok
+    end.
+
 on_node_down(Node) ->
     rabbit_misc:execute_mnesia_tx_with_tail(
       fun () -> QsDels =
@@ -682,7 +825,7 @@ on_node_down(Node) ->
                                               slave_pids = []}
                                         <- mnesia:table(rabbit_queue),
                                     node(Pid) == Node andalso
-                                    not rabbit_misc:is_process_alive(Pid)])),
+                                    not rabbit_mnesia:is_process_alive(Pid)])),
                 {Qs, Dels} = lists:unzip(QsDels),
                 T = rabbit_binding:process_deletions(
                       lists:foldl(fun rabbit_binding:combine_deletions/2,
@@ -709,15 +852,29 @@ pseudo_queue(QueueName, Pid) ->
               pid          = Pid,
               slave_pids   = []}.
 
-deliver([], _Delivery, _Flow) ->
+immutable(Q) -> Q#amqqueue{pid                = none,
+                           slave_pids         = none,
+                           sync_slave_pids    = none,
+                           recoverable_slaves = none,
+                           gm_pids            = none,
+                           policy             = none,
+                           decorators         = none,
+                           state              = none}.
+
+deliver([], _Delivery) ->
     %% /dev/null optimisation
     [];
 
-deliver(Qs, Delivery, Flow) ->
+deliver(Qs, Delivery = #delivery{flow = Flow}) ->
     {MPids, SPids} = qpids(Qs),
     QPids = MPids ++ SPids,
+    %% We use up two credits to send to a slave since the message
+    %% arrives at the slave from two directions. We will ack one when
+    %% the slave receives the message direct from the channel, and the
+    %% other when it receives it via GM.
     case Flow of
-        flow   -> [credit_flow:send(QPid) || QPid <- QPids];
+        flow   -> [credit_flow:send(QPid) || QPid <- QPids],
+                  [credit_flow:send(QPid) || QPid <- SPids];
         noflow -> ok
     end,
 
@@ -726,8 +883,8 @@ deliver(Qs, Delivery, Flow) ->
     %% after they have become master they should mark the message as
     %% 'delivered' since they do not know what the master may have
     %% done with it.
-    MMsg = {deliver, Delivery, false, Flow},
-    SMsg = {deliver, Delivery, true,  Flow},
+    MMsg = {deliver, Delivery, false},
+    SMsg = {deliver, Delivery, true},
     delegate:cast(MPids, MMsg),
     delegate:cast(SPids, SMsg),
     QPids.
index 9b785303a406875dc1e63c92ff9436cf12792510..c5e4206fe36e5203be2765746cb97e359fd9e198 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_amqqueue_process).
@@ -24,7 +24,7 @@
 -define(RAM_DURATION_UPDATE_INTERVAL, 5000).
 -define(CONSUMER_BIAS_RATIO,           1.1). %% i.e. consume 10% faster
 
--export([start_link/1, info_keys/0]).
+-export([info_keys/0]).
 
 -export([init_with_backing_queue_state/7]).
 
@@ -52,6 +52,7 @@
             dlx,
             dlx_routing_key,
             max_length,
+            max_bytes,
             args_policy_version,
             status
            }).
 
 -ifdef(use_specs).
 
--spec(start_link/1 ::
-        (rabbit_types:amqqueue()) -> rabbit_types:ok_pid_or_error()).
 -spec(info_keys/0 :: () -> rabbit_types:info_keys()).
 -spec(init_with_backing_queue_state/7 ::
         (rabbit_types:amqqueue(), atom(), tuple(), any(),
-         [rabbit_types:delivery()], pmon:pmon(), dict()) -> #q{}).
+         [rabbit_types:delivery()], pmon:pmon(), dict:dict()) -> #q{}).
 
 -endif.
 
@@ -84,7 +83,7 @@
          memory,
          slave_pids,
          synchronised_slave_pids,
-         backing_queue_status,
+         recoverable_slaves,
          state
         ]).
 
 
 %%----------------------------------------------------------------------------
 
-start_link(Q) -> gen_server2:start_link(?MODULE, Q, []).
-
-info_keys() -> ?INFO_KEYS.
+info_keys()       -> ?INFO_KEYS       ++ rabbit_backing_queue:info_keys().
+statistics_keys() -> ?STATISTICS_KEYS ++ rabbit_backing_queue:info_keys().
 
 %%----------------------------------------------------------------------------
 
@@ -110,26 +108,8 @@ init(Q) ->
     process_flag(trap_exit, true),
     ?store_proc_name(Q#amqqueue.name),
     {ok, init_state(Q#amqqueue{pid = self()}), hibernate,
-     {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS,
-                              RateTRef, Deliveries, Senders, MTC) ->
-    case Owner of
-        none -> ok;
-        _    -> erlang:monitor(process, Owner)
-    end,
-    State = init_state(Q),
-    State1 = State#q{backing_queue       = BQ,
-                     backing_queue_state = BQS,
-                     rate_timer_ref      = RateTRef,
-                     senders             = Senders,
-                     msg_id_to_channel   = MTC},
-    State2 = process_args_policy(State1),
-    State3 = lists:foldl(fun (Delivery, StateN) ->
-                                 deliver_or_enqueue(Delivery, true, StateN)
-                         end, State2, Deliveries),
-    notify_decorators(startup, State3),
-    State3.
+     {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE},
+    ?MODULE}.
 
 init_state(Q) ->
     State = #q{q                   = Q,
@@ -142,44 +122,37 @@ init_state(Q) ->
                args_policy_version = 0},
     rabbit_event:init_stats_timer(State, #q.stats_timer).
 
-terminate(shutdown = R,      State = #q{backing_queue = BQ}) ->
-    terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
-terminate({shutdown, missing_owner} = Reason, State) ->
-    %% if the owner was missing then there will be no queue, so don't emit stats
-    terminate_shutdown(terminate_delete(false, Reason, State), State);
-terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
-    terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
-terminate(Reason,            State) ->
-    terminate_shutdown(terminate_delete(true, Reason, State), State).
+init_it(Recover, From, State = #q{q = #amqqueue{exclusive_owner = none}}) ->
+    init_it2(Recover, From, State);
 
-terminate_delete(EmitStats, Reason,
-                 State = #q{q = #amqqueue{name          = QName},
-                                          backing_queue = BQ}) ->
-    fun (BQS) ->
-        BQS1 = BQ:delete_and_terminate(Reason, BQS),
-        if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer,
-                                                fun() -> emit_stats(State) end);
-           true      -> ok
-        end,
-        %% don't care if the internal delete doesn't return 'ok'.
-        rabbit_amqqueue:internal_delete(QName),
-        BQS1
+%% You used to be able to declare an exclusive durable queue. Sadly we
+%% need to still tidy up after that case, there could be the remnants
+%% of one left over from an upgrade. So that's why we don't enforce
+%% Recover = new here.
+init_it(Recover, From, State = #q{q = #amqqueue{exclusive_owner = Owner}}) ->
+    case rabbit_misc:is_process_alive(Owner) of
+        true  -> erlang:monitor(process, Owner),
+                 init_it2(Recover, From, State);
+        false -> #q{backing_queue       = undefined,
+                    backing_queue_state = undefined,
+                    q                   = Q} = State,
+                 send_reply(From, {owner_died, Q}),
+                 BQ = backing_queue_module(Q),
+                 {_, Terms} = recovery_status(Recover),
+                 BQS = bq_init(BQ, Q, Terms),
+                 %% Rely on terminate to delete the queue.
+                 {stop, {shutdown, missing_owner},
+                  State#q{backing_queue = BQ, backing_queue_state = BQS}}
     end.
 
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-%%----------------------------------------------------------------------------
-
-declare(Recover, From, State = #q{q                   = Q,
-                                  backing_queue       = undefined,
-                                  backing_queue_state = undefined}) ->
-    {Recovery, TermsOrNew} = recovery_status(Recover),
-    case rabbit_amqqueue:internal_declare(Q, Recovery /= new) of
+init_it2(Recover, From, State = #q{q                   = Q,
+                                   backing_queue       = undefined,
+                                   backing_queue_state = undefined}) ->
+    {Barrier, TermsOrNew} = recovery_status(Recover),
+    case rabbit_amqqueue:internal_declare(Q, Recover /= new) of
         #amqqueue{} = Q1 ->
-            case matches(Recovery, Q, Q1) of
+            case matches(Recover, Q, Q1) of
                 true ->
-                    gen_server2:reply(From, {new, Q}),
                     ok = file_handle_cache:register_callback(
                            rabbit_amqqueue, set_maximum_since_use, [self()]),
                     ok = rabbit_memory_monitor:register(
@@ -187,7 +160,8 @@ declare(Recover, From, State = #q{q                   = Q,
                                     set_ram_duration_target, [self()]}),
                     BQ = backing_queue_module(Q1),
                     BQS = bq_init(BQ, Q, TermsOrNew),
-                    recovery_barrier(Recovery),
+                    send_reply(From, {new, Q}),
+                    recovery_barrier(Barrier),
                     State1 = process_args_policy(
                                State#q{backing_queue       = BQ,
                                        backing_queue_state = BQS}),
@@ -204,8 +178,11 @@ declare(Recover, From, State = #q{q                   = Q,
             {stop, normal, Err, State}
     end.
 
-recovery_status(new)              -> {new,     new};
-recovery_status({Recover, Terms}) -> {Recover, Terms}.
+recovery_status(new)              -> {no_barrier, new};
+recovery_status({Recover, Terms}) -> {Recover,    Terms}.
+
+send_reply(none, _Q) -> ok;
+send_reply(From, Q)  -> gen_server2:reply(From, Q).
 
 matches(new, Q1, Q2) ->
     %% i.e. not policy
@@ -219,6 +196,91 @@ matches(new, Q1, Q2) ->
 matches(_,  Q,   Q) -> true;
 matches(_, _Q, _Q1) -> false.
 
+recovery_barrier(no_barrier) ->
+    ok;
+recovery_barrier(BarrierPid) ->
+    MRef = erlang:monitor(process, BarrierPid),
+    receive
+        {BarrierPid, go}              -> erlang:demonitor(MRef, [flush]);
+        {'DOWN', MRef, process, _, _} -> ok
+    end.
+
+init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS,
+                              RateTRef, Deliveries, Senders, MTC) ->
+    case Owner of
+        none -> ok;
+        _    -> erlang:monitor(process, Owner)
+    end,
+    State = init_state(Q),
+    State1 = State#q{backing_queue       = BQ,
+                     backing_queue_state = BQS,
+                     rate_timer_ref      = RateTRef,
+                     senders             = Senders,
+                     msg_id_to_channel   = MTC},
+    State2 = process_args_policy(State1),
+    State3 = lists:foldl(fun (Delivery, StateN) ->
+                                 deliver_or_enqueue(Delivery, true, StateN)
+                         end, State2, Deliveries),
+    notify_decorators(startup, State3),
+    State3.
+
+terminate(shutdown = R,      State = #q{backing_queue = BQ}) ->
+    terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+terminate({shutdown, missing_owner} = Reason, State) ->
+    %% if the owner was missing then there will be no queue, so don't emit stats
+    terminate_shutdown(terminate_delete(false, Reason, State), State);
+terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
+    terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+terminate(normal,            State) -> %% delete case
+    terminate_shutdown(terminate_delete(true, normal, State), State);
+%% If we crashed don't try to clean up the BQS, probably best to leave it.
+terminate(_Reason,           State = #q{q = Q}) ->
+    terminate_shutdown(fun (BQS) ->
+                               Q2 = Q#amqqueue{state = crashed},
+                               rabbit_misc:execute_mnesia_transaction(
+                                 fun() ->
+                                         rabbit_amqqueue:store_queue(Q2)
+                                 end),
+                               BQS
+                       end, State).
+
+terminate_delete(EmitStats, Reason,
+                 State = #q{q = #amqqueue{name          = QName},
+                                          backing_queue = BQ}) ->
+    fun (BQS) ->
+        BQS1 = BQ:delete_and_terminate(Reason, BQS),
+        if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer,
+                                                fun() -> emit_stats(State) end);
+           true      -> ok
+        end,
+        %% don't care if the internal delete doesn't return 'ok'.
+        rabbit_amqqueue:internal_delete(QName),
+        BQS1
+    end.
+
+terminate_shutdown(Fun, State) ->
+    State1 = #q{backing_queue_state = BQS, consumers = Consumers} =
+        lists:foldl(fun (F, S) -> F(S) end, State,
+                    [fun stop_sync_timer/1,
+                     fun stop_rate_timer/1,
+                     fun stop_expiry_timer/1,
+                     fun stop_ttl_timer/1]),
+    case BQS of
+        undefined -> State1;
+        _         -> ok = rabbit_memory_monitor:deregister(self()),
+                     QName = qname(State),
+                     notify_decorators(shutdown, State),
+                     [emit_consumer_deleted(Ch, CTag, QName) ||
+                         {Ch, CTag, _, _, _} <-
+                             rabbit_queue_consumers:all(Consumers)],
+                     State1#q{backing_queue_state = Fun(BQS)}
+    end.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%----------------------------------------------------------------------------
+
 maybe_notify_decorators(false, State) -> State;
 maybe_notify_decorators(true,  State) -> notify_decorators(State), State.
 
@@ -247,15 +309,6 @@ bq_init(BQ, Q, Recover) ->
                     rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
             end).
 
-recovery_barrier(new) ->
-    ok;
-recovery_barrier(BarrierPid) ->
-    MRef = erlang:monitor(process, BarrierPid),
-    receive
-        {BarrierPid, go}              -> erlang:demonitor(MRef, [flush]);
-        {'DOWN', MRef, process, _, _} -> ok
-    end.
-
 process_args_policy(State = #q{q                   = Q,
                                args_policy_version = N}) ->
       ArgsTable =
@@ -263,7 +316,8 @@ process_args_policy(State = #q{q                   = Q,
          {<<"dead-letter-exchange">>,    fun res_arg/2, fun init_dlx/2},
          {<<"dead-letter-routing-key">>, fun res_arg/2, fun init_dlx_rkey/2},
          {<<"message-ttl">>,             fun res_min/2, fun init_ttl/2},
-         {<<"max-length">>,              fun res_min/2, fun init_max_length/2}],
+         {<<"max-length">>,              fun res_min/2, fun init_max_length/2},
+         {<<"max-length-bytes">>,        fun res_min/2, fun init_max_bytes/2}],
       drop_expired_msgs(
          lists:foldl(fun({Name, Resolve, Fun}, StateN) ->
                              Fun(args_policy_lookup(Name, Resolve, Q), StateN)
@@ -302,23 +356,9 @@ init_max_length(MaxLen, State) ->
     {_Dropped, State1} = maybe_drop_head(State#q{max_length = MaxLen}),
     State1.
 
-terminate_shutdown(Fun, State) ->
-    State1 = #q{backing_queue_state = BQS, consumers = Consumers} =
-        lists:foldl(fun (F, S) -> F(S) end, State,
-                    [fun stop_sync_timer/1,
-                     fun stop_rate_timer/1,
-                     fun stop_expiry_timer/1,
-                     fun stop_ttl_timer/1]),
-    case BQS of
-        undefined -> State1;
-        _         -> ok = rabbit_memory_monitor:deregister(self()),
-                     QName = qname(State),
-                     notify_decorators(shutdown, State),
-                     [emit_consumer_deleted(Ch, CTag, QName) ||
-                         {Ch, CTag, _, _, _} <-
-                             rabbit_queue_consumers:all(Consumers)],
-                     State1#q{backing_queue_state = Fun(BQS)}
-    end.
+init_max_bytes(MaxBytes, State) ->
+    {_Dropped, State1} = maybe_drop_head(State#q{max_bytes = MaxBytes}),
+    State1.
 
 reply(Reply, NewState) ->
     {NewState1, Timeout} = next_state(NewState),
@@ -385,15 +425,13 @@ ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref       = undefined,
                  V when V > 0 -> V + 999; %% always fire later
                  _            -> 0
              end) div 1000,
-    TRef = erlang:send_after(After, self(), {drop_expired, Version}),
+    TRef = rabbit_misc:send_after(After, self(), {drop_expired, Version}),
     State#q{ttl_timer_ref = TRef, ttl_timer_expiry = Expiry};
 ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref    = TRef,
                                     ttl_timer_expiry = TExpiry})
   when Expiry + 1000 < TExpiry ->
-    case erlang:cancel_timer(TRef) of
-        false -> State;
-        _     -> ensure_ttl_timer(Expiry, State#q{ttl_timer_ref = undefined})
-    end;
+    rabbit_misc:cancel_timer(TRef),
+    ensure_ttl_timer(Expiry, State#q{ttl_timer_ref = undefined});
 ensure_ttl_timer(_Expiry, State) ->
     State.
 
@@ -460,12 +498,13 @@ send_mandatory(#delivery{mandatory  = true,
 
 discard(#delivery{confirm = Confirm,
                   sender  = SenderPid,
+                  flow    = Flow,
                   message = #basic_message{id = MsgId}}, BQ, BQS, MTC) ->
     MTC1 = case Confirm of
                true  -> confirm_messages([MsgId], MTC);
                false -> MTC
            end,
-    BQS1 = BQ:discard(MsgId, SenderPid, BQS),
+    BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
     {BQS1, MTC1}.
 
 run_message_queue(State) -> run_message_queue(false, State).
@@ -487,14 +526,17 @@ run_message_queue(ActiveConsumersChanged, State) ->
                  end
     end.
 
-attempt_delivery(Delivery = #delivery{sender = SenderPid, message = Message},
+attempt_delivery(Delivery = #delivery{sender  = SenderPid,
+                                      flow    = Flow,
+                                      message = Message},
                  Props, Delivered, State = #q{backing_queue       = BQ,
                                               backing_queue_state = BQS,
                                               msg_id_to_channel   = MTC}) ->
     case rabbit_queue_consumers:deliver(
            fun (true)  -> true = BQ:is_empty(BQS),
-                          {AckTag, BQS1} = BQ:publish_delivered(
-                                             Message, Props, SenderPid, BQS),
+                          {AckTag, BQS1} =
+                              BQ:publish_delivered(
+                                Message, Props, SenderPid, Flow, BQS),
                           {{Message, Delivered, AckTag}, {BQS1, MTC}};
                (false) -> {{Message, Delivered, undefined},
                            discard(Delivery, BQ, BQS, MTC)}
@@ -511,7 +553,9 @@ attempt_delivery(Delivery = #delivery{sender = SenderPid, message = Message},
                             State#q{consumers = Consumers})}
     end.
 
-deliver_or_enqueue(Delivery = #delivery{message = Message, sender = SenderPid},
+deliver_or_enqueue(Delivery = #delivery{message = Message,
+                                        sender  = SenderPid,
+                                        flow    = Flow},
                    Delivered, State = #q{backing_queue       = BQ,
                                          backing_queue_state = BQS}) ->
     send_mandatory(Delivery), %% must do this before confirms
@@ -532,7 +576,7 @@ deliver_or_enqueue(Delivery = #delivery{message = Message, sender = SenderPid},
             {BQS3, MTC1} = discard(Delivery, BQ, BQS2, MTC),
             State3#q{backing_queue_state = BQS3, msg_id_to_channel = MTC1};
         {undelivered, State3 = #q{backing_queue_state = BQS2}} ->
-            BQS3 = BQ:publish(Message, Props, Delivered, SenderPid, BQS2),
+            BQS3 = BQ:publish(Message, Props, Delivered, SenderPid, Flow, BQS2),
             {Dropped, State4 = #q{backing_queue_state = BQS4}} =
                 maybe_drop_head(State3#q{backing_queue_state = BQS3}),
             QLen = BQ:len(BQS4),
@@ -543,34 +587,41 @@ deliver_or_enqueue(Delivery = #delivery{message = Message, sender = SenderPid},
             %% remains unchanged, or if the newly published message
             %% has no expiry and becomes the head of the queue then
             %% the call is unnecessary.
-            case {Dropped > 0, QLen =:= 1, Props#message_properties.expiry} of
+            case {Dropped, QLen =:= 1, Props#message_properties.expiry} of
                 {false, false,         _} -> State4;
                 {true,  true,  undefined} -> State4;
                 {_,     _,             _} -> drop_expired_msgs(State4)
             end
     end.
 
-maybe_drop_head(State = #q{max_length = undefined}) ->
-    {0, State};
-maybe_drop_head(State = #q{max_length          = MaxLen,
-                           backing_queue       = BQ,
-                           backing_queue_state = BQS}) ->
-    case BQ:len(BQS) - MaxLen of
-        Excess when Excess > 0 ->
-            {Excess,
-             with_dlx(
-               State#q.dlx,
-               fun (X) -> dead_letter_maxlen_msgs(X, Excess, State) end,
-               fun () ->
-                       {_, BQS1} = lists:foldl(fun (_, {_, BQS0}) ->
-                                                       BQ:drop(false, BQS0)
-                                               end, {ok, BQS},
-                                               lists:seq(1, Excess)),
-                       State#q{backing_queue_state = BQS1}
-               end)};
-        _ -> {0, State}
+maybe_drop_head(State = #q{max_length = undefined,
+                           max_bytes  = undefined}) ->
+    {false, State};
+maybe_drop_head(State) ->
+    maybe_drop_head(false, State).
+
+maybe_drop_head(AlreadyDropped, State = #q{backing_queue       = BQ,
+                                           backing_queue_state = BQS}) ->
+    case over_max_length(State) of
+        true ->
+            maybe_drop_head(true,
+                            with_dlx(
+                              State#q.dlx,
+                              fun (X) -> dead_letter_maxlen_msg(X, State) end,
+                              fun () ->
+                                      {_, BQS1} = BQ:drop(false, BQS),
+                                      State#q{backing_queue_state = BQS1}
+                              end));
+        false ->
+            {AlreadyDropped, State}
     end.
 
+over_max_length(#q{max_length          = MaxLen,
+                   max_bytes           = MaxBytes,
+                   backing_queue       = BQ,
+                   backing_queue_state = BQS}) ->
+    BQ:len(BQS) > MaxLen orelse BQ:info(message_bytes_ready, BQS) > MaxBytes.
+
 requeue_and_run(AckTags, State = #q{backing_queue       = BQ,
                                     backing_queue_state = BQS}) ->
     WasEmpty = BQ:is_empty(BQS),
@@ -664,9 +715,12 @@ subtract_acks(ChPid, AckTags, State = #q{consumers = Consumers}, Fun) ->
                                    run_message_queue(true, Fun(State1))
     end.
 
-message_properties(Message, Confirm, #q{ttl = TTL}) ->
+message_properties(Message = #basic_message{content = Content},
+                   Confirm, #q{ttl = TTL}) ->
+    #content{payload_fragments_rev = PFR} = Content,
     #message_properties{expiry           = calculate_msg_expiry(Message, TTL),
-                        needs_confirming = Confirm == eventually}.
+                        needs_confirming = Confirm == eventually,
+                        size             = iolist_size(PFR)}.
 
 calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
     #content{properties = Props} =
@@ -723,15 +777,12 @@ dead_letter_rejected_msgs(AckTags, X,  State = #q{backing_queue = BQ}) ->
           end, rejected, X, State),
     State1.
 
-dead_letter_maxlen_msgs(X, Excess, State = #q{backing_queue = BQ}) ->
+dead_letter_maxlen_msg(X, State = #q{backing_queue = BQ}) ->
     {ok, State1} =
         dead_letter_msgs(
           fun (DLFun, Acc, BQS) ->
-                  lists:foldl(fun (_, {ok, Acc0, BQS0}) ->
-                                      {{Msg, _, AckTag}, BQS1} =
-                                        BQ:fetch(true, BQS0),
-                                      {ok, DLFun(Msg, AckTag, Acc0), BQS1}
-                              end, {ok, Acc, BQS}, lists:seq(1, Excess))
+                  {{Msg, _, AckTag}, BQS1} = BQ:fetch(true, BQS),
+                  {ok, DLFun(Msg, AckTag, Acc), BQS1}
           end, maxlen, X, State),
     State1.
 
@@ -810,19 +861,25 @@ i(synchronised_slave_pids, #q{q = #amqqueue{name = Name}}) ->
         false -> '';
         true  -> SSPids
     end;
+i(recoverable_slaves, #q{q = #amqqueue{name    = Name,
+                                       durable = Durable}}) ->
+    {ok, Q = #amqqueue{recoverable_slaves = Nodes}} =
+        rabbit_amqqueue:lookup(Name),
+    case Durable andalso rabbit_mirror_queue_misc:is_mirrored(Q) of
+        false -> '';
+        true  -> Nodes
+    end;
 i(state, #q{status = running}) -> credit_flow:state();
 i(state, #q{status = State})   -> State;
-i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
-    BQ:status(BQS);
-i(Item, _) ->
-    throw({bad_argument, Item}).
+i(Item, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+    BQ:info(Item, BQS).
 
 emit_stats(State) ->
     emit_stats(State, []).
 
 emit_stats(State, Extra) ->
     ExtraKs = [K || {K, _} <- Extra],
-    Infos = [{K, V} || {K, V} <- infos(?STATISTICS_KEYS, State),
+    Infos = [{K, V} || {K, V} <- infos(statistics_keys(), State),
                        not lists:member(K, ExtraKs)],
     rabbit_event:notify(queue_stats, Extra ++ Infos).
 
@@ -896,33 +953,11 @@ prioritise_info(Msg, _Len, #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
         _                                    -> 0
     end.
 
-handle_call({init, Recover}, From,
-            State = #q{q = #amqqueue{exclusive_owner = none}}) ->
-    declare(Recover, From, State);
-
-%% You used to be able to declare an exclusive durable queue. Sadly we
-%% need to still tidy up after that case, there could be the remnants
-%% of one left over from an upgrade. So that's why we don't enforce
-%% Recover = new here.
-handle_call({init, Recover}, From,
-            State = #q{q = #amqqueue{exclusive_owner = Owner}}) ->
-    case rabbit_misc:is_process_alive(Owner) of
-        true  -> erlang:monitor(process, Owner),
-                 declare(Recover, From, State);
-        false -> #q{backing_queue       = undefined,
-                    backing_queue_state = undefined,
-                    q                   = Q} = State,
-                 gen_server2:reply(From, {owner_died, Q}),
-                 BQ = backing_queue_module(Q),
-                 {_, Terms} = recovery_status(Recover),
-                 BQS = bq_init(BQ, Q, Terms),
-                 %% Rely on terminate to delete the queue.
-                 {stop, {shutdown, missing_owner},
-                  State#q{backing_queue = BQ, backing_queue_state = BQS}}
-    end;
+handle_call({init, Recover}, From, State) ->
+    init_it(Recover, From, State);
 
 handle_call(info, _From, State) ->
-    reply(infos(?INFO_KEYS, State), State);
+    reply(infos(info_keys(), State), State);
 
 handle_call({info, Items}, _From, State) ->
     try
@@ -1064,19 +1099,30 @@ handle_call(sync_mirrors, _From, State) ->
 handle_call(cancel_sync_mirrors, _From, State) ->
     reply({ok, not_syncing}, State).
 
+handle_cast(init, State) ->
+    init_it({no_barrier, non_clean_shutdown}, none, State);
+
 handle_cast({run_backing_queue, Mod, Fun},
             State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
     noreply(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)});
 
-handle_cast({deliver, Delivery = #delivery{sender = Sender}, Delivered, Flow},
+handle_cast({deliver, Delivery = #delivery{sender = Sender,
+                                           flow   = Flow}, SlaveWhenPublished},
             State = #q{senders = Senders}) ->
     Senders1 = case Flow of
                    flow   -> credit_flow:ack(Sender),
+                             case SlaveWhenPublished of
+                                 true  -> credit_flow:ack(Sender); %% [0]
+                                 false -> ok
+                             end,
                              pmon:monitor(Sender, Senders);
                    noflow -> Senders
                end,
     State1 = State#q{senders = Senders1},
-    noreply(deliver_or_enqueue(Delivery, Delivered, State1));
+    noreply(deliver_or_enqueue(Delivery, SlaveWhenPublished, State1));
+%% [0] The second ack is since the channel thought we were a slave at
+%% the time it published this message, so it used two credits (see
+%% rabbit_amqqueue:deliver/2).
 
 handle_cast({ack, AckTags, ChPid}, State) ->
     noreply(ack(AckTags, ChPid, State));
@@ -1165,7 +1211,7 @@ handle_cast({force_event_refresh, Ref},
                       emit_consumer_created(
                         Ch, CTag, true, AckRequired, QName, Prefetch, Args, Ref)
     end,
-    noreply(State);
+    noreply(rabbit_event:init_stats_timer(State, #q.stats_timer));
 
 handle_cast(notify_decorators, State) ->
     notify_decorators(State),
index 0fd64c26710314e12db2eb87c7a58eef1637aa6b..f05ff430cbab99d12418d3d4cde8cc1aec435b40 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_amqqueue_sup).
 
 -behaviour(supervisor2).
 
--export([start_link/0, start_child/2]).
+-export([start_link/2]).
 
 -export([init/1]).
 
 -include("rabbit.hrl").
 
--define(SERVER, ?MODULE).
-
 %%----------------------------------------------------------------------------
 
 -ifdef(use_specs).
 
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_child/2 ::
-        (node(), [any()]) -> rabbit_types:ok(pid() | undefined) |
-                             rabbit_types:ok({pid(), any()}) |
-                             rabbit_types:error(any())).
+-spec(start_link/2 :: (rabbit_types:amqqueue(), rabbit_prequeue:start_mode()) ->
+                           {'ok', pid(), pid()}).
 
 -endif.
 
 %%----------------------------------------------------------------------------
 
-start_link() ->
-    supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
-
-start_child(Node, Args) ->
-    supervisor2:start_child({?SERVER, Node}, Args).
-
-init([]) ->
-    {ok, {{simple_one_for_one, 10, 10},
-          [{rabbit_amqqueue, {rabbit_amqqueue_process, start_link, []},
-            temporary, ?MAX_WAIT, worker, [rabbit_amqqueue_process]}]}}.
+start_link(Q, StartMode) ->
+    Marker = spawn_link(fun() -> receive stop -> ok end end),
+    ChildSpec = {rabbit_amqqueue,
+                 {rabbit_prequeue, start_link, [Q, StartMode, Marker]},
+                 intrinsic, ?MAX_WAIT, worker, [rabbit_amqqueue_process,
+                                                rabbit_mirror_queue_slave]},
+    {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+    {ok, QPid} = supervisor2:start_child(SupPid, ChildSpec),
+    unlink(Marker),
+    Marker ! stop,
+    {ok, SupPid, QPid}.
+
+init([]) -> {ok, {{one_for_one, 5, 10}, []}}.
diff --git a/rabbitmq-server/src/rabbit_amqqueue_sup_sup.erl b/rabbitmq-server/src/rabbit_amqqueue_sup_sup.erl
new file mode 100644 (file)
index 0000000..4c4bb52
--- /dev/null
@@ -0,0 +1,52 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_amqqueue_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_queue_process/3]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-spec(start_queue_process/3 :: (node(), rabbit_types:amqqueue(),
+                               'declare' | 'recovery' | 'slave') -> pid()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+    supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
+
+start_queue_process(Node, Q, StartMode) ->
+    {ok, _SupPid, QPid} = supervisor2:start_child(
+                            {?SERVER, Node}, [Q, StartMode]),
+    QPid.
+
+init([]) ->
+    {ok, {{simple_one_for_one, 10, 10},
+          [{rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []},
+            temporary, ?MAX_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}.
index 863eb18e6aa16e6d22e3a4d3e6eca1bd644c1f44..ee6dabea1424d0c4e950bc0f25e33bf8220dcb79 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_backend_dummy).
 -include("rabbit.hrl").
 
--behaviour(rabbit_auth_backend).
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
 
--export([description/0]).
 -export([user/0]).
--export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
+-export([user_login_authentication/2, user_login_authorization/1,
+         check_vhost_access/3, check_resource_access/3]).
 
 -ifdef(use_specs).
 
 
 %% A user to be used by the direct client when permission checks are
 %% not needed. This user can do anything AMQPish.
-user() -> #user{username     = <<"dummy">>,
-                tags         = [],
-                auth_backend = ?MODULE,
-                impl         = none}.
+user() -> #user{username       = <<"none">>,
+                tags           = [],
+                authz_backends = [{?MODULE, none}]}.
 
 %% Implementation of rabbit_auth_backend
 
-description() ->
-    [{name, <<"Dummy">>},
-     {description, <<"Database for the dummy user">>}].
+user_login_authentication(_, _) ->
+    {refused, "cannot log in conventionally as dummy user", []}.
 
-check_user_login(_, _) ->
+user_login_authorization(_) ->
     {refused, "cannot log in conventionally as dummy user", []}.
 
-check_vhost_access(#user{}, _VHostPath) -> true.
-check_resource_access(#user{}, #resource{}, _Permission) -> true.
+check_vhost_access(#auth_user{}, _VHostPath, _Sock) -> true.
+check_resource_access(#auth_user{}, #resource{}, _Permission) -> true.
index fd1c4e8ee64139948dd42af906ca2a896e1f1d96..e53ce50c22dfea65f06f3ba37f8839cec4b8e491 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_backend_internal).
 -include("rabbit.hrl").
 
--behaviour(rabbit_auth_backend).
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
 
--export([description/0]).
--export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
+-export([user_login_authentication/2, user_login_authorization/1,
+         check_vhost_access/3, check_resource_access/3]).
 
 -export([add_user/2, delete_user/1, lookup_user/1,
          change_password/2, clear_password/1,
 %%----------------------------------------------------------------------------
 %% Implementation of rabbit_auth_backend
 
-description() ->
-    [{name, <<"Internal">>},
-     {description, <<"Internal user / password database">>}].
-
-check_user_login(Username, []) ->
+user_login_authentication(Username, []) ->
     internal_check_user_login(Username, fun(_) -> true end);
-check_user_login(Username, [{password, Cleartext}]) ->
+user_login_authentication(Username, [{password, Cleartext}]) ->
     internal_check_user_login(
       Username,
       fun (#internal_user{password_hash = <<Salt:4/binary, Hash/binary>>}) ->
@@ -90,25 +87,30 @@ check_user_login(Username, [{password, Cleartext}]) ->
           (#internal_user{}) ->
               false
       end);
-check_user_login(Username, AuthProps) ->
+user_login_authentication(Username, AuthProps) ->
     exit({unknown_auth_props, Username, AuthProps}).
 
+user_login_authorization(Username) ->
+    case user_login_authentication(Username, []) of
+        {ok, #auth_user{impl = Impl}} -> {ok, Impl};
+        Else                          -> Else
+    end.
+
 internal_check_user_login(Username, Fun) ->
     Refused = {refused, "user '~s' - invalid credentials", [Username]},
     case lookup_user(Username) of
         {ok, User = #internal_user{tags = Tags}} ->
             case Fun(User) of
-                true -> {ok, #user{username     = Username,
-                                   tags         = Tags,
-                                   auth_backend = ?MODULE,
-                                   impl         = User}};
+                true -> {ok, #auth_user{username = Username,
+                                        tags     = Tags,
+                                        impl     = none}};
                 _    -> Refused
             end;
         {error, not_found} ->
             Refused
     end.
 
-check_vhost_access(#user{username = Username}, VHostPath) ->
+check_vhost_access(#auth_user{username = Username}, VHostPath, _Sock) ->
     case mnesia:dirty_read({rabbit_user_permission,
                             #user_vhost{username     = Username,
                                         virtual_host = VHostPath}}) of
@@ -116,7 +118,7 @@ check_vhost_access(#user{username = Username}, VHostPath) ->
         [_R] -> true
     end.
 
-check_resource_access(#user{username = Username},
+check_resource_access(#auth_user{username = Username},
                       #resource{virtual_host = VHostPath, name = Name},
                       Permission) ->
     case mnesia:dirty_read({rabbit_user_permission,
index d11af09552c4c22c11690f6367e219112f593839..78e3e7dd4b7befb40d45d8471da7ee15b51c0c1d 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_mechanism).
 %%     Another round is needed. Here's the state I want next time.
 %% {protocol_error, Msg, Args}
 %%     Client got the protocol wrong. Log and die.
-%% {refused, Msg, Args}
+%% {refused, Username, Msg, Args}
 %%     Client failed authentication. Log and die.
 -callback handle_response(binary(), any()) ->
     {'ok', rabbit_types:user()} |
     {'challenge', binary(), any()} |
     {'protocol_error', string(), [any()]} |
-    {'refused', string(), [any()]}.
+    {'refused', rabbit_types:username() | none, string(), [any()]}.
 
 -else.
 
index e2183a99216a7e99c1e93ee0fed9db5303b5d592..621c575ebf1b2af802b4fd6acadce93efffaf9c5 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_mechanism_amqplain).
index b5751f414da64e18cb5aea7671ad2cfa47c92a7e..d9d7b11f15c624334984c1037fa757588d459e6e 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_mechanism_cr_demo).
index c008f6a7fa4ea640196b46db4fcc2e74eb64bb45..7a5f433abd694a630be24b7f4d4d654bf751dfb0 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_auth_mechanism_plain).
diff --git a/rabbitmq-server/src/rabbit_authn_backend.erl b/rabbitmq-server/src/rabbit_authn_backend.erl
new file mode 100644 (file)
index 0000000..b9cb0d3
--- /dev/null
@@ -0,0 +1,49 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_authn_backend).
+
+-include("rabbit.hrl").
+
+-ifdef(use_specs).
+
+%% Check a user can log in, given a username and a proplist of
+%% authentication information (e.g. [{password, Password}]). If your
+%% backend is not to be used for authentication, this should always
+%% refuse access.
+%%
+%% Possible responses:
+%% {ok, User}
+%%     Authentication succeeded, and here's the user record.
+%% {error, Error}
+%%     Something went wrong. Log and die.
+%% {refused, Msg, Args}
+%%     Client failed authentication. Log and die.
+-callback user_login_authentication(rabbit_types:username(), [term()]) ->
+    {'ok', rabbit_types:auth_user()} |
+    {'refused', string(), [any()]} |
+    {'error', any()}.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+    [{user_login_authentication, 2}];
+behaviour_info(_Other) ->
+    undefined.
+
+-endif.
similarity index 54%
rename from rabbitmq-server/src/rabbit_auth_backend.erl
rename to rabbitmq-server/src/rabbit_authz_backend.erl
index a7dd6494b1eedc5c6b2900aefe38045a71432859..12364b654b262c48f1c155ea6b4228a79142e2f1 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_auth_backend).
+-module(rabbit_authz_backend).
 
--ifdef(use_specs).
+-include("rabbit.hrl").
 
-%% A description proplist as with auth mechanisms,
-%% exchanges. Currently unused.
--callback description() -> [proplists:property()].
+-ifdef(use_specs).
 
-%% Check a user can log in, given a username and a proplist of
-%% authentication information (e.g. [{password, Password}]).
+%% Check a user can log in, when this backend is being used for
+%% authorisation only. Authentication has already taken place
+%% successfully, but we need to check that the user exists in this
+%% backend, and initialise any impl field we will want to have passed
+%% back in future calls to check_vhost_access/3 and
+%% check_resource_access/3.
 %%
 %% Possible responses:
-%% {ok, User}
-%%     Authentication succeeded, and here's the user record.
+%% {ok, Impl}
+%%     User authorisation succeeded, and here's the impl field.
 %% {error, Error}
 %%     Something went wrong. Log and die.
 %% {refused, Msg, Args}
-%%     Client failed authentication. Log and die.
--callback check_user_login(rabbit_types:username(), [term()]) ->
-    {'ok', rabbit_types:user()} |
+%%     User authorisation failed. Log and die.
+-callback user_login_authorization(rabbit_types:username()) ->
+    {'ok', any()} |
     {'refused', string(), [any()]} |
     {'error', any()}.
 
-%% Given #user and vhost, can a user log in to a vhost?
+%% Given #auth_user and vhost, can a user log in to a vhost?
 %% Possible responses:
 %% true
 %% false
 %% {error, Error}
 %%     Something went wrong. Log and die.
--callback check_vhost_access(rabbit_types:user(), rabbit_types:vhost()) ->
+-callback check_vhost_access(rabbit_types:auth_user(),
+                             rabbit_types:vhost(), rabbit_net:socket()) ->
     boolean() | {'error', any()}.
 
-
-%% Given #user, resource and permission, can a user access a resource?
+%% Given #auth_user, resource and permission, can a user access a resource?
 %%
 %% Possible responses:
 %% true
 %% false
 %% {error, Error}
 %%     Something went wrong. Log and die.
--callback check_resource_access(rabbit_types:user(),
+-callback check_resource_access(rabbit_types:auth_user(),
                                 rabbit_types:r(atom()),
                                 rabbit_access_control:permission_atom()) ->
     boolean() | {'error', any()}.
@@ -64,8 +66,8 @@
 -export([behaviour_info/1]).
 
 behaviour_info(callbacks) ->
-    [{description, 0}, {check_user_login, 2}, {check_vhost_access, 2},
-     {check_resource_access, 3}];
+    [{user_login_authorization, 1},
+     {check_vhost_access, 3}, {check_resource_access, 3}];
 behaviour_info(_Other) ->
     undefined.
 
index c5237d346dba2ea96d7ffcfdd72028c08c9fa216..cd53127e599fd5b00343ea3cedb738a5614431d7 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_autoheal).
 
--export([init/0, maybe_start/1, rabbit_down/2, node_down/2, handle_msg/3]).
+-export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2,
+         handle_msg/3]).
 
 %% The named process we are running in.
 -define(SERVER, rabbit_node_monitor).
 
+-define(MNESIA_STOPPED_PING_INTERNAL, 200).
+
+-define(AUTOHEAL_STATE_AFTER_RESTART, rabbit_autoheal_state_after_restart).
+
 %%----------------------------------------------------------------------------
 
 %% In order to autoheal we want to:
 %% stops - if a node stops for any other reason it just gets a message
 %% it will ignore, and otherwise we carry on.
 %%
+%% Meanwhile, the leader may continue to receive new autoheal requests:
+%% all of them are ignored. The winner notifies the leader when the
+%% current autoheal process is finished (ie. when all losers stopped and
+%% were asked to start again) or was aborted. When the leader receives
+%% the notification or if it looses contact with the winner, it can
+%% accept new autoheal requests.
+%%
 %% The winner and the leader are not necessarily the same node.
 %%
-%% Possible states:
+%% The leader can be a loser and will restart in this case. It remembers
+%% there is an autoheal in progress by temporarily saving the autoheal
+%% state to the application environment.
+%%
+%% == Possible states ==
 %%
 %% not_healing
 %%   - the default
 %%   - we are the winner and are waiting for all losing nodes to stop
 %%   before telling them they can restart
 %%
+%% {leader_waiting, Winner, Notify}
+%%   - we are the leader, and have already assigned the winner and losers.
+%%   We are waiting for a confirmation from the winner that the autoheal
+%%   process has ended. Meanwhile we can ignore autoheal requests.
+%%   Because we may be a loser too, this state is saved to the application
+%%   environment and restored on startup.
+%%
 %% restarting
 %%   - we are restarting. Of course the node monitor immediately dies
 %%   then so this state does not last long. We therefore send the
 %%   autoheal_safe_to_start message to the rabbit_outside_app_process
 %%   instead.
+%%
+%% == Message flow ==
+%%
+%% 1. Any node (leader included) >> {request_start, node()} >> Leader
+%%      When Mnesia detects it is running partitioned or
+%%      when a remote node starts, rabbit_node_monitor calls
+%%      rabbit_autoheal:maybe_start/1. The message above is sent to the
+%%      leader so the leader can take a decision.
+%%
+%% 2. Leader >> {become_winner, Losers} >> Winner
+%%      The leader notifies the winner so the latter can proceed with
+%%      the autoheal.
+%%
+%% 3. Winner >> {winner_is, Winner} >> All losers
+%%      The winner notifies losers they must stop.
+%%
+%% 4. Winner >> autoheal_safe_to_start >> All losers
+%%      When either all losers stopped or the autoheal process was
+%%      aborted, the winner notifies losers they can start again.
+%%
+%% 5. Leader >> report_autoheal_status >> Winner
+%%      The leader asks the autoheal status to the winner. This only
+%%      happens when the leader is a loser too. If this is not the case,
+%%      this message is never sent.
+%%
+%% 6. Winner >> {autoheal_finished, Winner} >> Leader
+%%      The winner notifies the leader that the autoheal process was
+%%      either finished or aborted (ie. autoheal_safe_to_start was sent
+%%      to losers).
 
 %%----------------------------------------------------------------------------
 
-init() -> not_healing.
+init() ->
+    %% We check the application environment for a saved autoheal state
+    %% saved during a restart. If this node is a leader, it is used
+    %% to determine if it needs to ask the winner to report about the
+    %% autoheal progress.
+    State = case application:get_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART) of
+        {ok, S}   -> S;
+        undefined -> not_healing
+    end,
+    ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART),
+    case State of
+        {leader_waiting, Winner, _} ->
+            rabbit_log:info(
+              "Autoheal: in progress, requesting report from ~p~n", [Winner]),
+            send(Winner, report_autoheal_status);
+        _ ->
+            ok
+    end,
+    State.
 
 maybe_start(not_healing) ->
     case enabled() of
-        true  -> [Leader | _] = lists:usort(rabbit_mnesia:cluster_nodes(all)),
+        true  -> Leader = leader(),
                  send(Leader, {request_start, node()}),
                  rabbit_log:info("Autoheal request sent to ~p~n", [Leader]),
                  not_healing;
@@ -76,38 +146,39 @@ maybe_start(State) ->
     State.
 
 enabled() ->
-    {ok, autoheal} =:= application:get_env(rabbit, cluster_partition_handling).
+    case application:get_env(rabbit, cluster_partition_handling) of
+        {ok, autoheal}                         -> true;
+        {ok, {pause_if_all_down, _, autoheal}} -> true;
+        _                                      -> false
+    end.
 
+leader() ->
+    [Leader | _] = lists:usort(rabbit_mnesia:cluster_nodes(all)),
+    Leader.
 
 %% This is the winner receiving its last notification that a node has
 %% stopped - all nodes can now start again
 rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
     rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]),
-    notify_safe(Notify),
-    not_healing;
+    winner_finish(Notify);
 
 rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
     {winner_waiting, WaitFor -- [Node], Notify};
 
-rabbit_down(Node, {leader_waiting, [Node]}) ->
-    not_healing;
-
-rabbit_down(Node, {leader_waiting, WaitFor}) ->
-    {leader_waiting, WaitFor -- [Node]};
+rabbit_down(Winner, {leader_waiting, Winner, Losers}) ->
+    abort([Winner], Losers);
 
 rabbit_down(_Node, State) ->
-    %% ignore, we already cancelled the autoheal process
+    %% Ignore. Either:
+    %%     o  we already cancelled the autoheal process;
+    %%     o  we are still waiting the winner's report.
     State.
 
 node_down(_Node, not_healing) ->
     not_healing;
 
 node_down(Node, {winner_waiting, _, Notify}) ->
-    rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
-    %% Make sure any nodes waiting for us start - it won't necessarily
-    %% heal the partition but at least they won't get stuck.
-    notify_safe(Notify),
-    not_healing;
+    abort([Node], Notify);
 
 node_down(Node, _State) ->
     rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
@@ -118,78 +189,158 @@ node_down(Node, _State) ->
 handle_msg({request_start, Node},
            not_healing, Partitions) ->
     rabbit_log:info("Autoheal request received from ~p~n", [Node]),
-    rabbit_node_monitor:ping_all(),
-    case rabbit_node_monitor:all_rabbit_nodes_up() of
-        false -> not_healing;
-        true  -> AllPartitions = all_partitions(Partitions),
-                 {Winner, Losers} = make_decision(AllPartitions),
-                 rabbit_log:info("Autoheal decision~n"
-                                 "  * Partitions: ~p~n"
-                                 "  * Winner:     ~p~n"
-                                 "  * Losers:     ~p~n",
-                                 [AllPartitions, Winner, Losers]),
-                 [send(L, {winner_is, Winner}) || L <- Losers],
-                 Continue = fun(Msg) ->
-                                    handle_msg(Msg, not_healing, Partitions)
-                            end,
-                 case node() =:= Winner of
-                     true  -> Continue({become_winner, Losers});
-                     false -> send(Winner, {become_winner, Losers}), %% [0]
-                              case lists:member(node(), Losers) of
-                                  true  -> Continue({winner_is, Winner});
-                                  false -> {leader_waiting, Losers}
-                              end
-                 end
+    case check_other_nodes(Partitions) of
+        {error, E} ->
+            rabbit_log:info("Autoheal request denied: ~s~n", [fmt_error(E)]),
+            not_healing;
+        {ok, AllPartitions} ->
+            {Winner, Losers} = make_decision(AllPartitions),
+            rabbit_log:info("Autoheal decision~n"
+                            "  * Partitions: ~p~n"
+                            "  * Winner:     ~p~n"
+                            "  * Losers:     ~p~n",
+                            [AllPartitions, Winner, Losers]),
+            case node() =:= Winner of
+                true  -> handle_msg({become_winner, Losers},
+                                    not_healing, Partitions);
+                false -> send(Winner, {become_winner, Losers}),
+                         {leader_waiting, Winner, Losers}
+            end
     end;
-%% [0] If we are a loser we will never receive this message - but it
-%% won't stick in the mailbox as we are restarting anyway
 
 handle_msg({request_start, Node},
            State, _Partitions) ->
-    rabbit_log:info("Autoheal request received from ~p when in state ~p; "
-                    "ignoring~n", [Node, State]),
+    rabbit_log:info("Autoheal request received from ~p when healing; "
+                    "ignoring~n", [Node]),
     State;
 
 handle_msg({become_winner, Losers},
            not_healing, _Partitions) ->
     rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n",
                     [Losers]),
-    {winner_waiting, Losers, Losers};
+    %% The leader said everything was ready - do we agree? If not then
+    %% give up.
+    Down = Losers -- rabbit_node_monitor:alive_rabbit_nodes(Losers),
+    case Down of
+        [] -> [send(L, {winner_is, node()}) || L <- Losers],
+              {winner_waiting, Losers, Losers};
+        _  -> abort(Down, Losers)
+    end;
 
-handle_msg({become_winner, Losers},
-           {winner_waiting, WaitFor, Notify}, _Partitions) ->
-    rabbit_log:info("Autoheal: I am the winner, waiting additionally for "
-                    "~p to stop~n", [Losers]),
-    {winner_waiting, lists:usort(Losers ++ WaitFor),
-     lists:usort(Losers ++ Notify)};
+handle_msg({winner_is, Winner}, State = not_healing,
+           _Partitions) ->
+    %% This node is a loser, nothing else.
+    restart_loser(State, Winner),
+    restarting;
+handle_msg({winner_is, Winner}, State = {leader_waiting, Winner, _},
+           _Partitions) ->
+    %% This node is the leader and a loser at the same time.
+    restart_loser(State, Winner),
+    restarting;
 
-handle_msg({winner_is, Winner},
-           not_healing, _Partitions) ->
+handle_msg(_, restarting, _Partitions) ->
+    %% ignore, we can contribute no further
+    restarting;
+
+handle_msg(report_autoheal_status, not_healing, _Partitions) ->
+    %% The leader is asking about the autoheal status to us (the
+    %% winner). This happens when the leader is a loser and it just
+    %% restarted. We are in the "not_healing" state, so the previous
+    %% autoheal process ended: let's tell this to the leader.
+    send(leader(), {autoheal_finished, node()}),
+    not_healing;
+
+handle_msg(report_autoheal_status, State, _Partitions) ->
+    %% Like above, the leader is asking about the autoheal status. We
+    %% are not finished with it. There is no need to send anything yet
+    %% to the leader: we will send the notification when it is over.
+    State;
+
+handle_msg({autoheal_finished, Winner},
+           {leader_waiting, Winner, _}, _Partitions) ->
+    %% The winner is finished with the autoheal process and notified us
+    %% (the leader). We can transition to the "not_healing" state and
+    %% accept new requests.
+    rabbit_log:info("Autoheal finished according to winner ~p~n", [Winner]),
+    not_healing;
+
+handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
+           when Winner =:= node() ->
+    %% We are the leader and the winner. The state already transitioned
+    %% to "not_healing" at the end of the autoheal process.
+    rabbit_log:info("Autoheal finished according to winner ~p~n", [node()]),
+    not_healing.
+
+%%----------------------------------------------------------------------------
+
+send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
+
+abort(Down, Notify) ->
+    rabbit_log:info("Autoheal: aborting - ~p down~n", [Down]),
+    %% Make sure any nodes waiting for us start - it won't necessarily
+    %% heal the partition but at least they won't get stuck.
+    winner_finish(Notify).
+
+winner_finish(Notify) ->
+    %% There is a race in Mnesia causing a starting loser to hang
+    %% forever if another loser stops at the same time: the starting
+    %% node connects to the other node, negotiates the protocol and
+    %% attempts to acquire a write lock on the schema on the other node.
+    %% If the other node stops between the protocol negotiation and lock
+    %% request, the starting node never gets an answer to its lock
+    %% request.
+    %%
+    %% To work around the problem, we make sure Mnesia is stopped on all
+    %% losing nodes before sending the "autoheal_safe_to_start" signal.
+    wait_for_mnesia_shutdown(Notify),
+    [{rabbit_outside_app_process, N} ! autoheal_safe_to_start || N <- Notify],
+    send(leader(), {autoheal_finished, node()}),
+    not_healing.
+
+wait_for_mnesia_shutdown([Node | Rest] = AllNodes) ->
+    case rpc:call(Node, mnesia, system_info, [is_running]) of
+        no ->
+            wait_for_mnesia_shutdown(Rest);
+        Running when
+        Running =:= yes orelse
+        Running =:= starting orelse
+        Running =:= stopping ->
+            timer:sleep(?MNESIA_STOPPED_PING_INTERNAL),
+            wait_for_mnesia_shutdown(AllNodes);
+        _ ->
+            wait_for_mnesia_shutdown(Rest)
+    end;
+wait_for_mnesia_shutdown([]) ->
+    ok.
+
+restart_loser(State, Winner) ->
     rabbit_log:warning(
       "Autoheal: we were selected to restart; winner is ~p~n", [Winner]),
     rabbit_node_monitor:run_outside_applications(
       fun () ->
               MRef = erlang:monitor(process, {?SERVER, Winner}),
               rabbit:stop(),
-              receive
-                  {'DOWN', MRef, process, {?SERVER, Winner}, _Reason} -> ok;
-                  autoheal_safe_to_start                              -> ok
+              NextState = receive
+                  {'DOWN', MRef, process, {?SERVER, Winner}, _Reason} ->
+                      not_healing;
+                  autoheal_safe_to_start ->
+                      State
               end,
               erlang:demonitor(MRef, [flush]),
+              %% During the restart, the autoheal state is lost so we
+              %% store it in the application environment temporarily so
+              %% init/0 can pick it up.
+              %%
+              %% This is useful to the leader which is a loser at the
+              %% same time: because the leader is restarting, there
+              %% is a great chance it misses the "autoheal finished!"
+              %% notification from the winner. Thanks to the saved
+              %% state, it knows it needs to ask the winner if the
+              %% autoheal process is finished or not.
+              application:set_env(rabbit,
+                ?AUTOHEAL_STATE_AFTER_RESTART, NextState),
               rabbit:start()
-      end),
-    restarting;
-
-handle_msg(_, restarting, _Partitions) ->
-    %% ignore, we can contribute no further
-    restarting.
-
-%%----------------------------------------------------------------------------
-
-send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
-
-notify_safe(Notify) ->
-    [{rabbit_outside_app_process, N} ! autoheal_safe_to_start || N <- Notify].
+      end, true).
 
 make_decision(AllPartitions) ->
     Sorted = lists:sort([{partition_value(P), P} || P <- AllPartitions]),
@@ -206,11 +357,21 @@ partition_value(Partition) ->
 %% We have our local understanding of what partitions exist; but we
 %% only know which nodes we have been partitioned from, not which
 %% nodes are partitioned from each other.
-all_partitions(PartitionedWith) ->
+check_other_nodes(LocalPartitions) ->
     Nodes = rabbit_mnesia:cluster_nodes(all),
-    Partitions = [{node(), PartitionedWith} |
-                  rabbit_node_monitor:partitions(Nodes -- [node()])],
-    all_partitions(Partitions, [Nodes]).
+    {Results, Bad} = rabbit_node_monitor:status(Nodes -- [node()]),
+    RemotePartitions = [{Node, proplists:get_value(partitions, Res)}
+                        || {Node, Res} <- Results],
+    RemoteDown = [{Node, Down}
+                  || {Node, Res} <- Results,
+                     Down <- [Nodes -- proplists:get_value(nodes, Res)],
+                     Down =/= []],
+    case {Bad, RemoteDown} of
+        {[], []} -> Partitions = [{node(), LocalPartitions} | RemotePartitions],
+                    {ok, all_partitions(Partitions, [Nodes])};
+        {[], _}  -> {error, {remote_down, RemoteDown}};
+        {_,  _}  -> {error, {nodes_down, Bad}}
+    end.
 
 all_partitions([], Partitions) ->
     Partitions;
@@ -225,3 +386,8 @@ all_partitions([{Node, CantSee} | Rest], Partitions) ->
                       _        -> [A, B | Others]
                   end,
     all_partitions(Rest, Partitions1).
+
+fmt_error({remote_down, RemoteDown}) ->
+    rabbit_misc:format("Remote nodes disconnected:~n ~p", [RemoteDown]);
+fmt_error({nodes_down, NodesDown}) ->
+    rabbit_misc:format("Local nodes down: ~p", [NodesDown]).
index 8f37bf60da10d9c16d898455b89bac225c8b706d..d6cd3ca43d64d05a6eea205777d55bcf8c0783cd 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_backing_queue).
 
+-export([info_keys/0]).
+
+-define(INFO_KEYS, [messages_ram, messages_ready_ram,
+                    messages_unacknowledged_ram, messages_persistent,
+                    message_bytes, message_bytes_ready,
+                    message_bytes_unacknowledged, message_bytes_ram,
+                    message_bytes_persistent,
+                    disk_reads, disk_writes, backing_queue_status]).
+
 -ifdef(use_specs).
 
 %% We can't specify a per-queue ack/state with callback signatures
 -type(ack()   :: any()).
 -type(state() :: any()).
 
+-type(flow() :: 'flow' | 'noflow').
 -type(msg_ids() :: [rabbit_types:msg_id()]).
 -type(fetch_result(Ack) ::
         ('empty' | {rabbit_types:basic_message(), boolean(), Ack})).
@@ -37,6 +47,8 @@
 -type(msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A)).
 -type(msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean())).
 
+-spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+
 %% Called on startup with a list of durable queue names. The queues
 %% aren't being started at this point, but this call allows the
 %% backing queue to perform any checking necessary for the consistency
 %% content.
 -callback delete_and_terminate(any(), state()) -> state().
 
+%% Called to clean up after a crashed queue. In this case we don't
+%% have a process and thus a state(), we are just removing on-disk data.
+-callback delete_crashed(rabbit_types:amqqueue()) -> 'ok'.
+
 %% Remove all 'fetchable' messages from the queue, i.e. all messages
 %% except those that have been fetched already and are pending acks.
 -callback purge(state()) -> {purged_msg_count(), state()}.
 
 %% Publish a message.
 -callback publish(rabbit_types:basic_message(),
-                  rabbit_types:message_properties(), boolean(), pid(),
+                  rabbit_types:message_properties(), boolean(), pid(), flow(),
                   state()) -> state().
 
 %% Called for messages which have already been passed straight
 %% out to a client. The queue will be empty for these calls
 %% (i.e. saves the round trip through the backing queue).
 -callback publish_delivered(rabbit_types:basic_message(),
-                            rabbit_types:message_properties(), pid(), state())
+                            rabbit_types:message_properties(), pid(), flow(),
+                            state())
                            -> {ack(), state()}.
 
 %% Called to inform the BQ about messages which have reached the
 %% queue, but are not going to be further passed to BQ.
--callback discard(rabbit_types:msg_id(), pid(), state()) -> state().
+-callback discard(rabbit_types:msg_id(), pid(), flow(), state()) -> state().
 
 %% Return ids of messages which have been confirmed since the last
 %% invocation of this function (or initialisation).
 %% inbound messages and outbound messages at the moment.
 -callback msg_rates(state()) -> {float(), float()}.
 
-%% Exists for debugging purposes, to be able to expose state via
-%% rabbitmqctl list_queues backing_queue_status
--callback status(state()) -> [{atom(), any()}].
+-callback info(atom(), state()) -> any().
 
 %% Passed a function to be invoked with the relevant backing queue's
 %% state. Useful for when the backing queue or other components need
 
 behaviour_info(callbacks) ->
     [{start, 1}, {stop, 0}, {init, 3}, {terminate, 2},
-     {delete_and_terminate, 2}, {purge, 1}, {purge_acks, 1}, {publish, 5},
-     {publish_delivered, 4}, {discard, 3}, {drain_confirmed, 1},
-     {dropwhile, 2}, {fetchwhile, 4},
-     {fetch, 2}, {ack, 2}, {requeue, 2}, {ackfold, 4}, {fold, 3}, {len, 1},
+     {delete_and_terminate, 2}, {delete_crashed, 1}, {purge, 1},
+     {purge_acks, 1}, {publish, 6},
+     {publish_delivered, 5}, {discard, 4}, {drain_confirmed, 1},
+     {dropwhile, 2}, {fetchwhile, 4}, {fetch, 2},
+     {drop, 2}, {ack, 2}, {requeue, 2}, {ackfold, 4}, {fold, 3}, {len, 1},
      {is_empty, 1}, {depth, 1}, {set_ram_duration_target, 2},
      {ram_duration, 1}, {needs_timeout, 1}, {timeout, 1},
-     {handle_pre_hibernate, 1}, {resume, 1}, {msg_rates, 1}, {status, 1},
-     {invoke, 3}, {is_duplicate, 2}] ;
+     {handle_pre_hibernate, 1}, {resume, 1}, {msg_rates, 1},
+     {info, 2}, {invoke, 3}, {is_duplicate, 2}] ;
 behaviour_info(_Other) ->
     undefined.
 
 -endif.
+
+info_keys() -> ?INFO_KEYS.
diff --git a/rabbitmq-server/src/rabbit_backing_queue_qc.erl b/rabbitmq-server/src/rabbit_backing_queue_qc.erl
deleted file mode 100644 (file)
index 49b7112..0000000
+++ /dev/null
@@ -1,472 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_backing_queue_qc).
--ifdef(use_proper_qc).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
--include_lib("proper/include/proper.hrl").
-
--behaviour(proper_statem).
-
--define(BQMOD, rabbit_variable_queue).
--define(QUEUE_MAXLEN, 10000).
--define(TIMEOUT_LIMIT, 100).
-
--define(RECORD_INDEX(Key, Record),
-        proplists:get_value(
-          Key, lists:zip(record_info(fields, Record),
-                         lists:seq(2, record_info(size, Record))))).
-
--export([initial_state/0, command/1, precondition/2, postcondition/3,
-         next_state/3]).
-
--export([prop_backing_queue_test/0, publish_multiple/1,
-         timeout/2, bump_credit/1]).
-
--record(state, {bqstate,
-                len,         %% int
-                next_seq_id, %% int
-                messages,    %% gb_trees of seqid => {msg_props, basic_msg}
-                acks,        %% [{acktag, {seqid, {msg_props, basic_msg}}}]
-                confirms,    %% set of msgid
-                publishing}).%% int
-
-%% Initialise model
-
-initial_state() ->
-    #state{bqstate     = qc_variable_queue_init(qc_test_queue()),
-           len         = 0,
-           next_seq_id = 0,
-           messages    = gb_trees:empty(),
-           acks        = [],
-           confirms    = gb_sets:new(),
-           publishing  = 0}.
-
-%% Property
-
-prop_backing_queue_test() ->
-    ?FORALL(Cmds, commands(?MODULE, initial_state()),
-            backing_queue_test(Cmds)).
-
-backing_queue_test(Cmds) ->
-    {ok, FileSizeLimit} =
-        application:get_env(rabbit, msg_store_file_size_limit),
-    application:set_env(rabbit, msg_store_file_size_limit, 512,
-                        infinity),
-    {ok, MaxJournal} =
-        application:get_env(rabbit, queue_index_max_journal_entries),
-    application:set_env(rabbit, queue_index_max_journal_entries, 128,
-                        infinity),
-
-    {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds),
-
-    application:set_env(rabbit, msg_store_file_size_limit,
-                        FileSizeLimit, infinity),
-    application:set_env(rabbit, queue_index_max_journal_entries,
-                        MaxJournal, infinity),
-
-    ?BQMOD:delete_and_terminate(shutdown, BQ),
-    ?WHENFAIL(
-       io:format("Result: ~p~n", [Res]),
-       aggregate(command_names(Cmds), Res =:= ok)).
-
-%% Commands
-
-%% Command frequencies are tuned so that queues are normally
-%% reasonably short, but they may sometimes exceed
-%% ?QUEUE_MAXLEN. Publish-multiple and purging cause extreme queue
-%% lengths, so these have lower probabilities.  Fetches/drops are
-%% sufficiently frequent so that commands that need acktags get decent
-%% coverage.
-
-command(S) ->
-    frequency([{10, qc_publish(S)},
-               {1,  qc_publish_delivered(S)},
-               {1,  qc_publish_multiple(S)},  %% very slow
-               {9,  qc_fetch(S)},             %% needed for ack and requeue
-               {6,  qc_drop(S)},              %%
-               {15, qc_ack(S)},
-               {15, qc_requeue(S)},
-               {3,  qc_set_ram_duration_target(S)},
-               {1,  qc_ram_duration(S)},
-               {1,  qc_drain_confirmed(S)},
-               {1,  qc_dropwhile(S)},
-               {1,  qc_is_empty(S)},
-               {1,  qc_timeout(S)},
-               {1,  qc_bump_credit(S)},
-               {1,  qc_purge(S)},
-               {1,  qc_fold(S)}]).
-
-qc_publish(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, publish,
-     [qc_message(),
-      #message_properties{needs_confirming = frequency([{1,  true},
-                                                        {20, false}]),
-                          expiry = oneof([undefined | lists:seq(1, 10)])},
-      false, self(), BQ]}.
-
-qc_publish_multiple(#state{}) ->
-    {call, ?MODULE, publish_multiple, [resize(?QUEUE_MAXLEN, pos_integer())]}.
-
-qc_publish_delivered(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, publish_delivered,
-     [qc_message(), #message_properties{}, self(), BQ]}.
-
-qc_fetch(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, fetch, [boolean(), BQ]}.
-
-qc_drop(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, drop, [boolean(), BQ]}.
-
-qc_ack(#state{bqstate = BQ, acks = Acks}) ->
-    {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), BQ]}.
-
-qc_requeue(#state{bqstate = BQ, acks = Acks}) ->
-    {call, ?BQMOD, requeue, [rand_choice(proplists:get_keys(Acks)), BQ]}.
-
-qc_set_ram_duration_target(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, set_ram_duration_target,
-     [oneof([0, 1, 2, resize(1000, pos_integer()), infinity]), BQ]}.
-
-qc_ram_duration(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, ram_duration, [BQ]}.
-
-qc_drain_confirmed(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, drain_confirmed, [BQ]}.
-
-qc_dropwhile(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, dropwhile, [fun dropfun/1, BQ]}.
-
-qc_is_empty(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, is_empty, [BQ]}.
-
-qc_timeout(#state{bqstate = BQ}) ->
-    {call, ?MODULE, timeout, [BQ, ?TIMEOUT_LIMIT]}.
-
-qc_bump_credit(#state{bqstate = BQ}) ->
-    {call, ?MODULE, bump_credit, [BQ]}.
-
-qc_purge(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, purge, [BQ]}.
-
-qc_fold(#state{bqstate = BQ}) ->
-    {call, ?BQMOD, fold, [makefoldfun(pos_integer()), foldacc(), BQ]}.
-
-%% Preconditions
-
-%% Create long queues by only allowing publishing
-precondition(#state{publishing = Count}, {call, _Mod, Fun, _Arg})
-  when Count > 0, Fun /= publish ->
-    false;
-precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg})
-  when Fun =:= ack; Fun =:= requeue ->
-    length(Acks) > 0;
-precondition(#state{messages = Messages},
-    {call, ?BQMOD, publish_delivered, _Arg}) ->
-    gb_trees:is_empty(Messages);
-precondition(_S, {call, ?BQMOD, _Fun, _Arg}) ->
-    true;
-precondition(_S, {call, ?MODULE, timeout, _Arg}) ->
-    true;
-precondition(_S, {call, ?MODULE, bump_credit, _Arg}) ->
-    true;
-precondition(#state{len = Len}, {call, ?MODULE, publish_multiple, _Arg}) ->
-    Len < ?QUEUE_MAXLEN.
-
-%% Model updates
-
-next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Del, _Pid, _BQ]}) ->
-    #state{len         = Len,
-           messages    = Messages,
-           confirms    = Confirms,
-           publishing  = PublishCount,
-           next_seq_id = NextSeq} = S,
-    MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
-    NeedsConfirm =
-        {call, erlang, element,
-         [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
-    S#state{bqstate  = BQ,
-            len      = Len + 1,
-            next_seq_id = NextSeq + 1,
-            messages = gb_trees:insert(NextSeq, {MsgProps, Msg}, Messages),
-            publishing = {call, erlang, max, [0, {call, erlang, '-',
-                                                  [PublishCount, 1]}]},
-            confirms = case eval(NeedsConfirm) of
-                           true -> gb_sets:add(MsgId, Confirms);
-                           _    -> Confirms
-                       end};
-
-next_state(S, _BQ, {call, ?MODULE, publish_multiple, [PublishCount]}) ->
-    S#state{publishing = PublishCount};
-
-next_state(S, Res,
-           {call, ?BQMOD, publish_delivered,
-            [Msg, MsgProps, _Pid, _BQ]}) ->
-    #state{confirms = Confirms, acks = Acks, next_seq_id = NextSeq} = S,
-    AckTag = {call, erlang, element, [1, Res]},
-    BQ1    = {call, erlang, element, [2, Res]},
-    MsgId  = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
-    NeedsConfirm =
-        {call, erlang, element,
-         [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
-    S#state{bqstate  = BQ1,
-            next_seq_id = NextSeq + 1,
-            confirms = case eval(NeedsConfirm) of
-                           true -> gb_sets:add(MsgId, Confirms);
-                           _    -> Confirms
-                       end,
-            acks = [{AckTag, {NextSeq, {MsgProps, Msg}}}|Acks]
-           };
-
-next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) ->
-    next_state_fetch_and_drop(S, Res, AckReq, 3);
-
-next_state(S, Res, {call, ?BQMOD, drop, [AckReq, _BQ]}) ->
-    next_state_fetch_and_drop(S, Res, AckReq, 2);
-
-next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) ->
-    #state{acks = AcksState} = S,
-    BQ1 = {call, erlang, element, [2, Res]},
-    S#state{bqstate = BQ1,
-            acks    = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
-
-next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _V]}) ->
-    #state{messages = Messages, acks = AcksState} = S,
-    BQ1 = {call, erlang, element, [2, Res]},
-    Messages1 = lists:foldl(fun (AckTag, Msgs) ->
-                                {SeqId, MsgPropsMsg} =
-                                   proplists:get_value(AckTag, AcksState),
-                                gb_trees:insert(SeqId, MsgPropsMsg, Msgs)
-                            end, Messages, AcksArg),
-    S#state{bqstate  = BQ1,
-            len      = gb_trees:size(Messages1),
-            messages = Messages1,
-            acks     = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
-
-next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) ->
-    S#state{bqstate = BQ};
-
-next_state(S, Res, {call, ?BQMOD, ram_duration, _Args}) ->
-    BQ1 = {call, erlang, element, [2, Res]},
-    S#state{bqstate = BQ1};
-
-next_state(S, Res, {call, ?BQMOD, drain_confirmed, _Args}) ->
-    BQ1 = {call, erlang, element, [2, Res]},
-    S#state{bqstate = BQ1};
-
-next_state(S, Res, {call, ?BQMOD, dropwhile, _Args}) ->
-    BQ = {call, erlang, element, [2, Res]},
-    #state{messages = Messages} = S,
-    Msgs1 = drop_messages(Messages),
-    S#state{bqstate = BQ, len = gb_trees:size(Msgs1), messages = Msgs1};
-
-next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) ->
-    S;
-
-next_state(S, BQ, {call, ?MODULE, timeout, _Args}) ->
-    S#state{bqstate = BQ};
-next_state(S, BQ, {call, ?MODULE, bump_credit, _Args}) ->
-    S#state{bqstate = BQ};
-
-next_state(S, Res, {call, ?BQMOD, purge, _Args}) ->
-    BQ1 = {call, erlang, element, [2, Res]},
-    S#state{bqstate = BQ1, len = 0, messages = gb_trees:empty()};
-
-next_state(S, Res, {call, ?BQMOD, fold, _Args}) ->
-    BQ1 = {call, erlang, element, [2, Res]},
-    S#state{bqstate = BQ1}.
-
-%% Postconditions
-
-postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) ->
-    #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
-    case Res of
-        {{MsgFetched, _IsDelivered, AckTag}, _BQ} ->
-            {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
-            MsgFetched =:= Msg andalso
-            not proplists:is_defined(AckTag, Acks) andalso
-                not gb_sets:is_element(AckTag, Confrms) andalso
-                Len =/= 0;
-        {empty, _BQ} ->
-            Len =:= 0
-    end;
-
-postcondition(S, {call, ?BQMOD, drop, _Args}, Res) ->
-    #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
-    case Res of
-        {{MsgIdFetched, AckTag}, _BQ} ->
-            {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
-            MsgId = eval({call, erlang, element,
-                          [?RECORD_INDEX(id, basic_message), Msg]}),
-            MsgIdFetched =:= MsgId andalso
-            not proplists:is_defined(AckTag, Acks) andalso
-                not gb_sets:is_element(AckTag, Confrms) andalso
-                Len =/= 0;
-        {empty, _BQ} ->
-            Len =:= 0
-    end;
-
-postcondition(S, {call, ?BQMOD, publish_delivered, _Args}, {AckTag, _BQ}) ->
-    #state{acks = Acks, confirms = Confrms} = S,
-    not proplists:is_defined(AckTag, Acks) andalso
-        not gb_sets:is_element(AckTag, Confrms);
-
-postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) ->
-    {PurgeCount, _BQ} = Res,
-    Len =:= PurgeCount;
-
-postcondition(#state{len = Len}, {call, ?BQMOD, is_empty, _Args}, Res) ->
-    (Len =:= 0) =:= Res;
-
-postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) ->
-    #state{confirms = Confirms} = S,
-    {ReportedConfirmed, _BQ} = Res,
-    lists:all(fun (M) -> gb_sets:is_element(M, Confirms) end,
-              ReportedConfirmed);
-
-postcondition(S, {call, ?BQMOD, fold, [FoldFun, Acc0, _BQ0]}, {Res, _BQ1}) ->
-    #state{messages = Messages} = S,
-    {_, Model} = lists:foldl(fun ({_SeqId, {_MsgProps, _Msg}}, {stop, Acc}) ->
-                                     {stop, Acc};
-                                 ({_SeqId, {MsgProps, Msg}}, {cont, Acc}) ->
-                                     FoldFun(Msg, MsgProps, false, Acc)
-                             end, {cont, Acc0}, gb_trees:to_list(Messages)),
-    true = Model =:= Res;
-
-postcondition(#state{bqstate = BQ, len = Len}, {call, _M, _F, _A}, _Res) ->
-    ?BQMOD:len(BQ) =:= Len.
-
-%% Helpers
-
-publish_multiple(_C) ->
-    ok.
-
-timeout(BQ, 0) ->
-    BQ;
-timeout(BQ, AtMost) ->
-    case ?BQMOD:needs_timeout(BQ) of
-        false -> BQ;
-        _     -> timeout(?BQMOD:timeout(BQ), AtMost - 1)
-    end.
-
-bump_credit(BQ) ->
-    case credit_flow:blocked() of
-        false -> BQ;
-        true  -> receive
-                     {bump_credit, Msg} ->
-                         credit_flow:handle_bump_msg(Msg),
-                         ?BQMOD:resume(BQ)
-                 end
-    end.
-
-qc_message_payload() -> ?SIZED(Size, resize(Size * Size, binary())).
-
-qc_routing_key() -> noshrink(binary(10)).
-
-qc_delivery_mode() -> oneof([1, 2]).
-
-qc_message() -> qc_message(qc_delivery_mode()).
-
-qc_message(DeliveryMode) ->
-    {call, rabbit_basic, message, [qc_default_exchange(),
-                                   qc_routing_key(),
-                                   #'P_basic'{delivery_mode = DeliveryMode},
-                                   qc_message_payload()]}.
-
-qc_default_exchange() ->
-    {call, rabbit_misc, r, [<<>>, exchange, <<>>]}.
-
-qc_variable_queue_init(Q) ->
-    {call, ?BQMOD, init,
-     [Q, new, function(2, {ok, []})]}.
-
-qc_test_q() -> {call, rabbit_misc, r, [<<"/">>, queue, noshrink(binary(16))]}.
-
-qc_test_queue() -> qc_test_queue(boolean()).
-
-qc_test_queue(Durable) ->
-    #amqqueue{name        = qc_test_q(),
-              durable     = Durable,
-              auto_delete = false,
-              arguments   = [],
-              pid         = self()}.
-
-rand_choice([])   -> [];
-rand_choice(List) -> rand_choice(List, [], random:uniform(length(List))).
-
-rand_choice(_List, Selection, 0) ->
-    Selection;
-rand_choice(List, Selection, N)  ->
-    Picked = lists:nth(random:uniform(length(List)), List),
-                       rand_choice(List -- [Picked], [Picked | Selection],
-                       N - 1).
-
-makefoldfun(Size) ->
-    fun (Msg, _MsgProps, Unacked, Acc) ->
-            case {length(Acc) > Size, Unacked} of
-                {false, false} -> {cont, [Msg | Acc]};
-                {false, true}  -> {cont, Acc};
-                {true, _}      -> {stop, Acc}
-            end
-    end.
-foldacc() -> [].
-
-dropfun(Props) ->
-    Expiry = eval({call, erlang, element,
-                   [?RECORD_INDEX(expiry, message_properties), Props]}),
-    Expiry =/= 1.
-
-drop_messages(Messages) ->
-    case gb_trees:is_empty(Messages) of
-        true ->
-            Messages;
-        false -> {_Seq, MsgProps_Msg, M2} = gb_trees:take_smallest(Messages),
-            MsgProps = {call, erlang, element, [1, MsgProps_Msg]},
-            case dropfun(MsgProps) of
-                true  -> drop_messages(M2);
-                false -> Messages
-            end
-    end.
-
-next_state_fetch_and_drop(S, Res, AckReq, AckTagIdx) ->
-    #state{len = Len, messages = Messages, acks = Acks} = S,
-    ResultInfo = {call, erlang, element, [1, Res]},
-    BQ1        = {call, erlang, element, [2, Res]},
-    AckTag     = {call, erlang, element, [AckTagIdx, ResultInfo]},
-    S1         = S#state{bqstate = BQ1},
-    case gb_trees:is_empty(Messages) of
-        true  -> S1;
-        false -> {SeqId, MsgProp_Msg, M2} = gb_trees:take_smallest(Messages),
-                 S2 = S1#state{len = Len - 1, messages = M2},
-                 case AckReq of
-                     true  ->
-                         S2#state{acks = [{AckTag, {SeqId, MsgProp_Msg}}|Acks]};
-                     false ->
-                         S2
-                 end
-    end.
-
--else.
-
--export([prop_disabled/0]).
-
-prop_disabled() ->
-    exit({compiled_without_proper,
-          "PropEr was not present during compilation of the test module. "
-          "Hence all tests are disabled."}).
-
--endif.
index 85f9d56e8720336ee112e05bc473616b176f3806..efc5ce27452027c44c033a75831c1de030a12853 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_basic).
@@ -21,8 +21,8 @@
 -export([publish/4, publish/5, publish/1,
          message/3, message/4, properties/1, prepend_table_header/3,
          extract_headers/1, map_headers/2, delivery/4, header_routes/1,
-         parse_expiration/1]).
--export([build_content/2, from_content/1, msg_size/1]).
+         parse_expiration/1, header/2, header/3]).
+-export([build_content/2, from_content/1, msg_size/1, maybe_gc_large_msg/1]).
 
 %%----------------------------------------------------------------------------
 
@@ -32,6 +32,7 @@
         (rabbit_framing:amqp_property_record() | [{atom(), any()}])).
 -type(publish_result() ::
         ({ok, [pid()]} | rabbit_types:error('not_found'))).
+-type(header() :: any()).
 -type(headers() :: rabbit_framing:amqp_table() | 'undefined').
 
 -type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())).
 -spec(prepend_table_header/3 ::
         (binary(), rabbit_framing:amqp_table(), headers()) -> headers()).
 
+-spec(header/2 ::
+        (header(), headers()) -> 'undefined' | any()).
+-spec(header/3 ::
+        (header(), headers(), any()) -> 'undefined' | any()).
+
 -spec(extract_headers/1 :: (rabbit_types:content()) -> headers()).
 
 -spec(map_headers/2 :: (fun((headers()) -> headers()), rabbit_types:content())
@@ -79,6 +85,9 @@
 -spec(msg_size/1 :: (rabbit_types:content() | rabbit_types:message()) ->
                          non_neg_integer()).
 
+-spec(maybe_gc_large_msg/1 ::
+        (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer()).
+
 -endif.
 
 %%----------------------------------------------------------------------------
@@ -111,7 +120,7 @@ publish(X, Delivery) ->
 
 delivery(Mandatory, Confirm, Message, MsgSeqNo) ->
     #delivery{mandatory = Mandatory, confirm = Confirm, sender = self(),
-              message = Message, msg_seq_no = MsgSeqNo}.
+              message = Message, msg_seq_no = MsgSeqNo, flow = noflow}.
 
 build_content(Properties, BodyBin) when is_binary(BodyBin) ->
     build_content(Properties, [BodyBin]);
@@ -222,6 +231,19 @@ update_invalid(Name, Value, ExistingHdr, Header) ->
     NewHdr = rabbit_misc:set_table_value(ExistingHdr, Name, array, Values),
     set_invalid(NewHdr, Header).
 
+header(_Header, undefined) ->
+    undefined;
+header(_Header, []) ->
+    undefined;
+header(Header, Headers) ->
+    header(Header, Headers, undefined).
+
+header(Header, Headers, Default) ->
+    case lists:keysearch(Header, 1, Headers) of
+        false        -> Default;
+        {value, Val} -> Val
+    end.
+
 extract_headers(Content) ->
     #content{properties = #'P_basic'{headers = Headers}} =
         rabbit_binary_parser:ensure_content_decoded(Content),
@@ -276,5 +298,24 @@ parse_expiration(#'P_basic'{expiration = Expiration}) ->
             {error, {leftover_string, S}}
     end.
 
+%% Some processes (channel, writer) can get huge amounts of binary
+%% garbage when processing huge messages at high speed (since we only
+%% do enough reductions to GC every few hundred messages, and if each
+%% message is 1MB then that's ugly). So count how many bytes of
+%% message we have processed, and force a GC every so often.
+maybe_gc_large_msg(Content) ->
+    Size = msg_size(Content),
+    Current = case get(msg_size_for_gc) of
+                  undefined -> 0;
+                  C         -> C
+              end,
+    New = Current + Size,
+    put(msg_size_for_gc, case New > 1000000 of
+                             true  -> erlang:garbage_collect(),
+                                      0;
+                             false -> New
+                         end),
+    Size.
+
 msg_size(#content{payload_fragments_rev = PFR}) -> iolist_size(PFR);
 msg_size(#basic_message{content = Content})     -> msg_size(Content).
index 53ba35dbee5fd0a607deef9912b37065fefde3d2..34f2d601aa20d0995f61eaed24175387438e90fe 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_binary_generator).
index 3ab82cad7852bc986336441a00aa8324da3640d9..8b3bf3e6f568841521dec054ed0437476ea2419f 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_binary_parser).
 %% parse_table supports the AMQP 0-8/0-9 standard types, S, I, D, T
 %% and F, as well as the QPid extensions b, d, f, l, s, t, x, and V.
 
+-define(SIMPLE_PARSE_TABLE(BType, Pattern, RType),
+        parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+                      BType, Pattern, Rest/binary>>) ->
+               [{NameString, RType, Value} | parse_table(Rest)]).
+
+%% Note that we try to put these in approximately the order we expect
+%% to hit them, that's why the empty binary is half way through.
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{NameString, longstr, Value} | parse_table(Rest)];
+
+?SIMPLE_PARSE_TABLE($I, Value:32/signed,   signedint);
+?SIMPLE_PARSE_TABLE($T, Value:64/unsigned, timestamp);
+
 parse_table(<<>>) ->
     [];
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary, ValueAndRest/binary>>) ->
-    {Type, Value, Rest} = parse_field_value(ValueAndRest),
-    [{NameString, Type, Value} | parse_table(Rest)].
 
-parse_array(<<>>) ->
-    [];
-parse_array(<<ValueAndRest/binary>>) ->
-    {Type, Value, Rest} = parse_field_value(ValueAndRest),
-    [{Type, Value} | parse_array(Rest)].
+?SIMPLE_PARSE_TABLE($b, Value:8/signed,  byte);
+?SIMPLE_PARSE_TABLE($d, Value:64/float, double);
+?SIMPLE_PARSE_TABLE($f, Value:32/float, float);
+?SIMPLE_PARSE_TABLE($l, Value:64/signed, long);
+?SIMPLE_PARSE_TABLE($s, Value:16/signed, short);
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $t, Value:8/unsigned, Rest/binary>>) ->
+    [{NameString, bool, (Value /= 0)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
+    [{NameString, decimal, {Before, After}} | parse_table(Rest)];
 
-parse_field_value(<<$S, VLen:32/unsigned, V:VLen/binary, R/binary>>) ->
-    {longstr, V, R};
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{NameString, table, parse_table(Value)} | parse_table(Rest)];
 
-parse_field_value(<<$I, V:32/signed, R/binary>>) ->
-    {signedint, V, R};
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{NameString, array, parse_array(Value)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{NameString, binary, Value} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+              $V, Rest/binary>>) ->
+    [{NameString, void, undefined} | parse_table(Rest)].
+
+-define(SIMPLE_PARSE_ARRAY(BType, Pattern, RType),
+        parse_array(<<BType, Pattern, Rest/binary>>) ->
+               [{RType, Value} | parse_array(Rest)]).
+
+parse_array(<<$S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{longstr, Value} | parse_array(Rest)];
+
+?SIMPLE_PARSE_ARRAY($I, Value:32/signed,   signedint);
+?SIMPLE_PARSE_ARRAY($T, Value:64/unsigned, timestamp);
+
+parse_array(<<>>) ->
+    [];
 
-parse_field_value(<<$D, Before:8/unsigned, After:32/unsigned, R/binary>>) ->
-    {decimal, {Before, After}, R};
+?SIMPLE_PARSE_ARRAY($b, Value:8/signed,  byte);
+?SIMPLE_PARSE_ARRAY($d, Value:64/float, double);
+?SIMPLE_PARSE_ARRAY($f, Value:32/float, float);
+?SIMPLE_PARSE_ARRAY($l, Value:64/signed, long);
+?SIMPLE_PARSE_ARRAY($s, Value:16/signed, short);
 
-parse_field_value(<<$T, V:64/unsigned, R/binary>>) ->
-    {timestamp, V, R};
+parse_array(<<$t, Value:8/unsigned, Rest/binary>>) ->
+    [{bool, (Value /= 0)} | parse_array(Rest)];
 
-parse_field_value(<<$F, VLen:32/unsigned, Table:VLen/binary, R/binary>>) ->
-    {table, parse_table(Table), R};
+parse_array(<<$D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
+    [{decimal, {Before, After}} | parse_array(Rest)];
 
-parse_field_value(<<$A, VLen:32/unsigned, Array:VLen/binary, R/binary>>) ->
-    {array, parse_array(Array), R};
+parse_array(<<$F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{table, parse_table(Value)} | parse_array(Rest)];
 
-parse_field_value(<<$b, V:8/signed,   R/binary>>) -> {byte,        V, R};
-parse_field_value(<<$d, V:64/float,   R/binary>>) -> {double,      V, R};
-parse_field_value(<<$f, V:32/float,   R/binary>>) -> {float,       V, R};
-parse_field_value(<<$l, V:64/signed,  R/binary>>) -> {long,        V, R};
-parse_field_value(<<$s, V:16/signed,  R/binary>>) -> {short,       V, R};
-parse_field_value(<<$t, V:8/unsigned, R/binary>>) -> {bool, (V /= 0), R};
+parse_array(<<$A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{array, parse_array(Value)} | parse_array(Rest)];
 
-parse_field_value(<<$x, VLen:32/unsigned, V:VLen/binary, R/binary>>) ->
-    {binary, V, R};
+parse_array(<<$x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+    [{binary, Value} | parse_array(Rest)];
 
-parse_field_value(<<$V, R/binary>>) ->
-    {void, undefined, R}.
+parse_array(<<$V, Rest/binary>>) ->
+    [{void, undefined} | parse_array(Rest)].
 
 ensure_content_decoded(Content = #content{properties = Props})
   when Props =/= none ->
index 7a095e068ba669a31b2c76c09e6f7f8c1007c4f3..77a9277c4a956a4a416c05ff8875f65c8947ddb0 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_binding).
@@ -25,7 +25,7 @@
 -export([info_keys/0, info/1, info/2, info_all/1, info_all/2]).
 %% these must all be run inside a mnesia tx
 -export([has_for_source/1, remove_for_source/1,
-         remove_for_destination/1, remove_transient_for_destination/1]).
+         remove_for_destination/2, remove_transient_for_destination/1]).
 
 %%----------------------------------------------------------------------------
 
@@ -52,7 +52,9 @@
                    rabbit_types:ok_or_error(rabbit_types:amqp_error()))).
 -type(bindings() :: [rabbit_types:binding()]).
 
--opaque(deletions() :: dict()).
+%% TODO this should really be opaque but that seems to confuse 17.1's
+%% dialyzer into objecting to everything that uses it.
+-type(deletions() :: dict:dict()).
 
 -spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
                         'ok').
@@ -78,8 +80,8 @@
                    -> [rabbit_types:infos()]).
 -spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()).
 -spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()).
--spec(remove_for_destination/1 ::
-        (rabbit_types:binding_destination()) -> deletions()).
+-spec(remove_for_destination/2 ::
+        (rabbit_types:binding_destination(), boolean()) -> deletions()).
 -spec(remove_transient_for_destination/1 ::
         (rabbit_types:binding_destination()) -> deletions()).
 -spec(process_deletions/1 :: (deletions()) -> rabbit_misc:thunk('ok')).
@@ -215,7 +217,8 @@ remove(Binding, InnerFun) ->
 remove(Src, Dst, B) ->
     ok = sync_route(#route{binding = B}, durable(Src), durable(Dst),
                     fun mnesia:delete_object/3),
-    Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()),
+    Deletions = maybe_auto_delete(
+                  B#binding.source, [B], new_deletions(), false),
     process_deletions(Deletions).
 
 list(VHostPath) ->
@@ -298,11 +301,11 @@ remove_for_source(SrcName) ->
         mnesia:match_object(rabbit_route, Match, write) ++
             mnesia:match_object(rabbit_semi_durable_route, Match, write))).
 
-remove_for_destination(DstName) ->
-    remove_for_destination(DstName, fun remove_routes/1).
+remove_for_destination(DstName, OnlyDurable) ->
+    remove_for_destination(DstName, OnlyDurable, fun remove_routes/1).
 
 remove_transient_for_destination(DstName) ->
-    remove_for_destination(DstName, fun remove_transient_routes/1).
+    remove_for_destination(DstName, false, fun remove_transient_routes/1).
 
 %%----------------------------------------------------------------------------
 
@@ -362,7 +365,7 @@ not_found_or_absent_errs(Names) ->
 
 absent_errs_only(Names) ->
     Errs = [E || Name <- Names,
-                 {absent, _Q} = E <- [not_found_or_absent(Name)]],
+                 {absent, _Q, _Reason} = E <- [not_found_or_absent(Name)]],
     rabbit_misc:const(case Errs of
                           [] -> ok;
                           _  -> {error, {resources_missing, Errs}}
@@ -375,8 +378,8 @@ not_found_or_absent(#resource{kind = exchange} = Name) ->
     {not_found, Name};
 not_found_or_absent(#resource{kind = queue}    = Name) ->
     case rabbit_amqqueue:not_found_or_absent(Name) of
-        not_found        -> {not_found, Name};
-        {absent, _Q} = R -> R
+        not_found                 -> {not_found, Name};
+        {absent, _Q, _Reason} = R -> R
     end.
 
 contains(Table, MatchHead) ->
@@ -428,36 +431,50 @@ remove_transient_routes(Routes) ->
          R#route.binding
      end || R <- Routes].
 
-remove_for_destination(DstName, Fun) ->
+remove_for_destination(DstName, OnlyDurable, Fun) ->
     lock_route_tables(),
-    Match = reverse_route(
-              #route{binding = #binding{destination = DstName, _ = '_'}}),
-    Routes = [reverse_route(R) || R <- mnesia:match_object(
-                                         rabbit_reverse_route, Match, write)],
+    MatchFwd = #route{binding = #binding{destination = DstName, _ = '_'}},
+    MatchRev = reverse_route(MatchFwd),
+    Routes = case OnlyDurable of
+                 false -> [reverse_route(R) ||
+                              R <- mnesia:match_object(
+                                     rabbit_reverse_route, MatchRev, write)];
+                 true  -> lists:usort(
+                            mnesia:match_object(
+                              rabbit_durable_route, MatchFwd, write) ++
+                                mnesia:match_object(
+                                  rabbit_semi_durable_route, MatchFwd, write))
+             end,
     Bindings = Fun(Routes),
-    group_bindings_fold(fun maybe_auto_delete/3, new_deletions(),
-                        lists:keysort(#binding.source, Bindings)).
+    group_bindings_fold(fun maybe_auto_delete/4, new_deletions(),
+                        lists:keysort(#binding.source, Bindings), OnlyDurable).
 
 %% Requires that its input binding list is sorted in exchange-name
 %% order, so that the grouping of bindings (for passing to
 %% group_bindings_and_auto_delete1) works properly.
-group_bindings_fold(_Fun, Acc, []) ->
+group_bindings_fold(_Fun, Acc, [], _OnlyDurable) ->
     Acc;
-group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs]) ->
-    group_bindings_fold(Fun, SrcName, Acc, Bs, [B]).
+group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs],
+                    OnlyDurable) ->
+    group_bindings_fold(Fun, SrcName, Acc, Bs, [B], OnlyDurable).
 
 group_bindings_fold(
-  Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings) ->
-    group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings]);
-group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings) ->
+  Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings,
+  OnlyDurable) ->
+    group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings], OnlyDurable);
+group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings, OnlyDurable) ->
     %% Either Removed is [], or its head has a non-matching SrcName.
-    group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc), Removed).
+    group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc, OnlyDurable), Removed,
+                        OnlyDurable).
 
-maybe_auto_delete(XName, Bindings, Deletions) ->
+maybe_auto_delete(XName, Bindings, Deletions, OnlyDurable) ->
     {Entry, Deletions1} =
-        case mnesia:read({rabbit_exchange, XName}) of
+        case mnesia:read({case OnlyDurable of
+                              true  -> rabbit_durable_exchange;
+                              false -> rabbit_exchange
+                          end, XName}) of
             []  -> {{undefined, not_deleted, Bindings}, Deletions};
-            [X] -> case rabbit_exchange:maybe_auto_delete(X) of
+            [X] -> case rabbit_exchange:maybe_auto_delete(X, OnlyDurable) of
                        not_deleted ->
                            {{X, not_deleted, Bindings}, Deletions};
                        {deleted, Deletions2} ->
index 043ec7e363e9dea974cda28319dda0df74876a41..489f7b346971b783b978666f47cf53ec56be16fe 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_channel).
@@ -21,7 +21,8 @@
 -behaviour(gen_server2).
 
 -export([start_link/11, do/2, do/3, do_flow/3, flush/1, shutdown/1]).
--export([send_command/2, deliver/4, send_credit_reply/2, send_drained/2]).
+-export([send_command/2, deliver/4, deliver_reply/2,
+         send_credit_reply/2, send_drained/2]).
 -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]).
 -export([refresh_config_local/0, ready_for_close/1]).
 -export([force_event_refresh/1]).
@@ -30,7 +31,7 @@
          handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
          prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
 %% Internal
--export([list_local/0]).
+-export([list_local/0, deliver_reply_local/3]).
 
 -record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid,
              conn_name, limiter, tx, next_tag, unacked_message_q, user,
@@ -39,7 +40,9 @@
              queue_consumers, delivering_queues,
              queue_collector_pid, stats_timer, confirm_enabled, publish_seqno,
              unconfirmed, confirmed, mandatory, capabilities, trace_state,
-             consumer_prefetch}).
+             consumer_prefetch, reply_consumer,
+             %% flow | noflow, see rabbitmq-server#114
+             delivery_flow}).
 
 -define(MAX_PERMISSION_CACHE_SIZE, 12).
 
@@ -96,6 +99,9 @@
 -spec(deliver/4 ::
         (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg())
         -> 'ok').
+-spec(deliver_reply/2 :: (binary(), rabbit_types:delivery()) -> 'ok').
+-spec(deliver_reply_local/3 ::
+        (pid(), binary(), rabbit_types:delivery()) -> 'ok').
 -spec(send_credit_reply/2 :: (pid(), non_neg_integer()) -> 'ok').
 -spec(send_drained/2 :: (pid(), [{rabbit_types:ctag(), non_neg_integer()}])
                         -> 'ok').
@@ -142,6 +148,45 @@ send_command(Pid, Msg) ->
 deliver(Pid, ConsumerTag, AckRequired, Msg) ->
     gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}).
 
+deliver_reply(<<"amq.rabbitmq.reply-to.", Rest/binary>>, Delivery) ->
+    case decode_fast_reply_to(Rest) of
+        {ok, Pid, Key} ->
+            delegate:invoke_no_result(
+              Pid, {?MODULE, deliver_reply_local, [Key, Delivery]});
+        error ->
+            ok
+    end.
+
+%% We want to ensure people can't use this mechanism to send a message
+%% to an arbitrary process and kill it!
+deliver_reply_local(Pid, Key, Delivery) ->
+    case pg_local:in_group(rabbit_channels, Pid) of
+        true  -> gen_server2:cast(Pid, {deliver_reply, Key, Delivery});
+        false -> ok
+    end.
+
+declare_fast_reply_to(<<"amq.rabbitmq.reply-to">>) ->
+    exists;
+declare_fast_reply_to(<<"amq.rabbitmq.reply-to.", Rest/binary>>) ->
+    case decode_fast_reply_to(Rest) of
+        {ok, Pid, Key} ->
+            Msg = {declare_fast_reply_to, Key},
+            rabbit_misc:with_exit_handler(
+              rabbit_misc:const(not_found),
+              fun() -> gen_server2:call(Pid, Msg, infinity) end);
+        error ->
+            not_found
+    end;
+declare_fast_reply_to(_) ->
+    not_found.
+
+decode_fast_reply_to(Rest) ->
+    case string:tokens(binary_to_list(Rest), ".") of
+        [PidEnc, Key] -> Pid = binary_to_term(base64:decode(PidEnc)),
+                         {ok, Pid, Key};
+        _             -> error
+    end.
+
 send_credit_reply(Pid, Len) ->
     gen_server2:cast(Pid, {send_credit_reply, Len}).
 
@@ -192,6 +237,10 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
     process_flag(trap_exit, true),
     ?store_proc_name({ConnName, Channel}),
     ok = pg_local:join(rabbit_channels, self()),
+    Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of
+             true   -> flow;
+             false  -> noflow
+           end,
     State = #ch{state                   = starting,
                 protocol                = Protocol,
                 channel                 = Channel,
@@ -219,7 +268,9 @@ init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
                 mandatory               = dtree:empty(),
                 capabilities            = Capabilities,
                 trace_state             = rabbit_trace:init(VHost),
-                consumer_prefetch       = 0},
+                consumer_prefetch       = 0,
+                reply_consumer          = none,
+                delivery_flow           = Flow},
     State1 = rabbit_event:init_stats_timer(State, #ch.stats_timer),
     rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State1)),
     rabbit_event:if_enabled(State1, #ch.stats_timer,
@@ -262,6 +313,13 @@ handle_call({info, Items}, _From, State) ->
 handle_call(refresh_config, _From, State = #ch{virtual_host = VHost}) ->
     reply(ok, State#ch{trace_state = rabbit_trace:init(VHost)});
 
+handle_call({declare_fast_reply_to, Key}, _From,
+            State = #ch{reply_consumer = Consumer}) ->
+    reply(case Consumer of
+              {_, _, Key} -> exists;
+              _           -> not_found
+          end, State);
+
 handle_call(_Request, _From, State) ->
     noreply(State).
 
@@ -324,8 +382,32 @@ handle_cast({deliver, ConsumerTag, AckRequired,
                             exchange     = ExchangeName#resource.name,
                             routing_key  = RoutingKey},
            Content),
+    rabbit_basic:maybe_gc_large_msg(Content),
     noreply(record_sent(ConsumerTag, AckRequired, Msg, State));
 
+handle_cast({deliver_reply, _K, _Del}, State = #ch{state = closing}) ->
+    noreply(State);
+handle_cast({deliver_reply, _K, _Del}, State = #ch{reply_consumer = none}) ->
+    noreply(State);
+handle_cast({deliver_reply, Key, #delivery{message =
+                    #basic_message{exchange_name = ExchangeName,
+                                   routing_keys  = [RoutingKey | _CcRoutes],
+                                   content       = Content}}},
+            State = #ch{writer_pid     = WriterPid,
+                        next_tag       = DeliveryTag,
+                        reply_consumer = {ConsumerTag, _Suffix, Key}}) ->
+    ok = rabbit_writer:send_command(
+           WriterPid,
+           #'basic.deliver'{consumer_tag = ConsumerTag,
+                            delivery_tag = DeliveryTag,
+                            redelivered  = false,
+                            exchange     = ExchangeName#resource.name,
+                            routing_key  = RoutingKey},
+           Content),
+    noreply(State);
+handle_cast({deliver_reply, _K1, _}, State=#ch{reply_consumer = {_, _, _K2}}) ->
+    noreply(State);
+
 handle_cast({send_credit_reply, Len}, State = #ch{writer_pid = WriterPid}) ->
     ok = rabbit_writer:send_command(
            WriterPid, #'basic.credit_ok'{available = Len}),
@@ -341,7 +423,7 @@ handle_cast({send_drained, CTagCredit}, State = #ch{writer_pid = WriterPid}) ->
 handle_cast({force_event_refresh, Ref}, State) ->
     rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State),
                         Ref),
-    noreply(State);
+    noreply(rabbit_event:init_stats_timer(State, #ch.stats_timer));
 
 handle_cast({mandatory_received, MsgSeqNo}, State = #ch{mandatory = Mand}) ->
     %% NB: don't call noreply/1 since we don't want to send confirms.
@@ -409,6 +491,8 @@ format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
 
 %%---------------------------------------------------------------------------
 
+log(Level, Fmt, Args) -> rabbit_log:log(channel, Level, Fmt, Args).
+
 reply(Reply, NewState) -> {reply, Reply, next_state(NewState), hibernate}.
 
 noreply(NewState) -> {noreply, next_state(NewState), hibernate}.
@@ -433,17 +517,22 @@ send(_Command, #ch{state = closing}) ->
 send(Command, #ch{writer_pid = WriterPid}) ->
     ok = rabbit_writer:send_command(WriterPid, Command).
 
-handle_exception(Reason, State = #ch{protocol   = Protocol,
-                                     channel    = Channel,
-                                     writer_pid = WriterPid,
-                                     reader_pid = ReaderPid,
-                                     conn_pid   = ConnPid}) ->
+handle_exception(Reason, State = #ch{protocol     = Protocol,
+                                     channel      = Channel,
+                                     writer_pid   = WriterPid,
+                                     reader_pid   = ReaderPid,
+                                     conn_pid     = ConnPid,
+                                     conn_name    = ConnName,
+                                     virtual_host = VHost,
+                                     user         = User}) ->
     %% something bad's happened: notify_queues may not be 'ok'
     {_Result, State1} = notify_queues(State),
     case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
         {Channel, CloseMethod} ->
-            rabbit_log:error("connection ~p, channel ~p - soft error:~n~p~n",
-                             [ConnPid, Channel, Reason]),
+            log(error, "Channel error on connection ~p (~s, vhost: '~s',"
+                       " user: '~s'), channel ~p:~n~p~n",
+                       [ConnPid, ConnName, VHost, User#user.username,
+                        Channel, Reason]),
             ok = rabbit_writer:send_command(WriterPid, CloseMethod),
             {noreply, State1};
         {0, _} ->
@@ -501,7 +590,8 @@ check_user_id_header(#'P_basic'{user_id = Username},
                      #ch{user = #user{username = Username}}) ->
     ok;
 check_user_id_header(
-  #'P_basic'{}, #ch{user = #user{auth_backend = rabbit_auth_backend_dummy}}) ->
+  #'P_basic'{}, #ch{user = #user{authz_backends =
+                                     [{rabbit_auth_backend_dummy, _}]}}) ->
     ok;
 check_user_id_header(#'P_basic'{user_id = Claimed},
                      #ch{user = #user{username = Actual,
@@ -528,7 +618,7 @@ check_internal_exchange(_) ->
     ok.
 
 check_msg_size(Content) ->
-    Size = rabbit_basic:msg_size(Content),
+    Size = rabbit_basic:maybe_gc_large_msg(Content),
     case Size > ?MAX_MSG_SIZE of
         true  -> precondition_failed("message size ~B larger than max size ~B",
                                      [Size, ?MAX_MSG_SIZE]);
@@ -580,7 +670,7 @@ check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) ->
 check_not_default_exchange(_) ->
     ok.
 
-check_exchange_deletion(XName = #resource{name = <<"amq.rabbitmq.", _/binary>>,
+check_exchange_deletion(XName = #resource{name = <<"amq.", _/binary>>,
                                           kind = exchange}) ->
     rabbit_misc:protocol_error(
       access_refused, "deletion of system ~s not allowed",
@@ -604,6 +694,21 @@ check_name(Kind, NameBin = <<"amq.", _/binary>>) ->
 check_name(_Kind, NameBin) ->
     NameBin.
 
+maybe_set_fast_reply_to(
+  C = #content{properties = P = #'P_basic'{reply_to =
+                                               <<"amq.rabbitmq.reply-to">>}},
+  #ch{reply_consumer = ReplyConsumer}) ->
+    case ReplyConsumer of
+        none         -> rabbit_misc:protocol_error(
+                          precondition_failed,
+                          "fast reply consumer does not exist", []);
+        {_, Suf, _K} -> Rep = <<"amq.rabbitmq.reply-to.", Suf/binary>>,
+                        rabbit_binary_generator:clear_encoded_content(
+                          C#content{properties = P#'P_basic'{reply_to = Rep}})
+    end;
+maybe_set_fast_reply_to(C, _State) ->
+    C.
+
 record_confirms([], State) ->
     State;
 record_confirms(MXs, State = #ch{confirmed = C}) ->
@@ -668,8 +773,12 @@ handle_method(#'basic.publish'{exchange    = ExchangeNameBin,
                                mandatory   = Mandatory},
               Content, State = #ch{virtual_host    = VHostPath,
                                    tx              = Tx,
+                                   channel         = ChannelNum,
                                    confirm_enabled = ConfirmEnabled,
-                                   trace_state     = TraceState}) ->
+                                   trace_state     = TraceState,
+                                   user            = #user{username = Username},
+                                   conn_name       = ConnName,
+                                   delivery_flow   = Flow}) ->
     check_msg_size(Content),
     ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
     check_write_permitted(ExchangeName, State),
@@ -678,7 +787,8 @@ handle_method(#'basic.publish'{exchange    = ExchangeNameBin,
     %% We decode the content's properties here because we're almost
     %% certain to want to look at delivery-mode and priority.
     DecodedContent = #content {properties = Props} =
-        rabbit_binary_parser:ensure_content_decoded(Content),
+        maybe_set_fast_reply_to(
+          rabbit_binary_parser:ensure_content_decoded(Content), State),
     check_user_id_header(Props, State),
     check_expiration_header(Props),
     DoConfirm = Tx =/= none orelse ConfirmEnabled,
@@ -690,11 +800,12 @@ handle_method(#'basic.publish'{exchange    = ExchangeNameBin,
         end,
     case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of
         {ok, Message} ->
-            rabbit_trace:tap_in(Message, TraceState),
             Delivery = rabbit_basic:delivery(
                          Mandatory, DoConfirm, Message, MsgSeqNo),
             QNames = rabbit_exchange:route(Exchange, Delivery),
-            DQ = {Delivery, QNames},
+            rabbit_trace:tap_in(Message, QNames, ConnName, ChannelNum,
+                                Username, TraceState),
+            DQ = {Delivery#delivery{flow = Flow}, QNames},
             {noreply, case Tx of
                           none         -> deliver_to_queues(DQ, State1);
                           {Msgs, Acks} -> Msgs1 = queue:in(DQ, Msgs),
@@ -752,6 +863,56 @@ handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck},
             {reply, #'basic.get_empty'{}, State}
     end;
 
+handle_method(#'basic.consume'{queue        = <<"amq.rabbitmq.reply-to">>,
+                               consumer_tag = CTag0,
+                               no_ack       = NoAck,
+                               nowait       = NoWait},
+              _, State = #ch{reply_consumer   = ReplyConsumer,
+                             consumer_mapping = ConsumerMapping}) ->
+    case dict:find(CTag0, ConsumerMapping) of
+        error ->
+            case {ReplyConsumer, NoAck} of
+                {none, true} ->
+                    CTag = case CTag0 of
+                               <<>>  -> rabbit_guid:binary(
+                                          rabbit_guid:gen_secure(), "amq.ctag");
+                               Other -> Other
+                           end,
+                    %% Precalculate both suffix and key; base64 encoding is
+                    %% expensive
+                    Key = base64:encode(rabbit_guid:gen_secure()),
+                    PidEnc = base64:encode(term_to_binary(self())),
+                    Suffix = <<PidEnc/binary, ".", Key/binary>>,
+                    Consumer = {CTag, Suffix, binary_to_list(Key)},
+                    State1 = State#ch{reply_consumer = Consumer},
+                    case NoWait of
+                        true  -> {noreply, State1};
+                        false -> Rep = #'basic.consume_ok'{consumer_tag = CTag},
+                                 {reply, Rep, State1}
+                    end;
+                {_, false} ->
+                    rabbit_misc:protocol_error(
+                      precondition_failed,
+                      "reply consumer cannot acknowledge", []);
+                _ ->
+                    rabbit_misc:protocol_error(
+                      precondition_failed, "reply consumer already set", [])
+            end;
+        {ok, _} ->
+            %% Attempted reuse of consumer tag.
+            rabbit_misc:protocol_error(
+              not_allowed, "attempt to reuse consumer tag '~s'", [CTag0])
+    end;
+
+handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
+              _, State = #ch{reply_consumer = {ConsumerTag, _, _}}) ->
+    State1 = State#ch{reply_consumer = none},
+    case NoWait of
+        true  -> {noreply, State1};
+        false -> Rep = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
+                 {reply, Rep, State1}
+    end;
+
 handle_method(#'basic.consume'{queue        = QueueNameBin,
                                consumer_tag = ConsumerTag,
                                no_local     = _, % FIXME: implement
@@ -966,6 +1127,18 @@ handle_method(#'exchange.unbind'{destination = DestinationNameBin,
                    SourceNameBin, exchange, DestinationNameBin, RoutingKey,
                    Arguments, #'exchange.unbind_ok'{}, NoWait, State);
 
+%% Note that all declares to these are effectively passive. If it
+%% exists it by definition has one consumer.
+handle_method(#'queue.declare'{queue   = <<"amq.rabbitmq.reply-to",
+                                           _/binary>> = QueueNameBin,
+                               nowait  = NoWait}, _,
+              State = #ch{virtual_host = VHost}) ->
+    QueueName = rabbit_misc:r(VHost, queue, QueueNameBin),
+    case declare_fast_reply_to(QueueNameBin) of
+        exists    -> return_queue_declare_ok(QueueName, NoWait, 0, 1, State);
+        not_found -> rabbit_misc:not_found(QueueName)
+    end;
+
 handle_method(#'queue.declare'{queue       = QueueNameBin,
                                passive     = false,
                                durable     = DurableDeclare,
@@ -992,7 +1165,7 @@ handle_method(#'queue.declare'{queue       = QueueNameBin,
            QueueName,
            fun (Q) -> ok = rabbit_amqqueue:assert_equivalence(
                              Q, Durable, AutoDelete, Args, Owner),
-                      rabbit_amqqueue:stat(Q)
+                      maybe_stat(NoWait, Q)
            end) of
         {ok, MessageCount, ConsumerCount} ->
             return_queue_declare_ok(QueueName, NoWait, MessageCount,
@@ -1028,16 +1201,16 @@ handle_method(#'queue.declare'{queue       = QueueNameBin,
                     %% must have been created between the stat and the
                     %% declare. Loop around again.
                     handle_method(Declare, none, State);
-                {absent, Q} ->
-                    rabbit_misc:absent(Q);
+                {absent, Q, Reason} ->
+                    rabbit_misc:absent(Q, Reason);
                 {owner_died, _Q} ->
                     %% Presumably our own days are numbered since the
                     %% connection has died. Pretend the queue exists though,
                     %% just so nothing fails.
                     return_queue_declare_ok(QueueName, NoWait, 0, 0, State)
             end;
-        {error, {absent, Q}} ->
-            rabbit_misc:absent(Q)
+        {error, {absent, Q, Reason}} ->
+            rabbit_misc:absent(Q, Reason)
     end;
 
 handle_method(#'queue.declare'{queue   = QueueNameBin,
@@ -1048,7 +1221,7 @@ handle_method(#'queue.declare'{queue   = QueueNameBin,
     QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin),
     {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} =
         rabbit_amqqueue:with_or_die(
-          QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end),
+          QueueName, fun (Q) -> {maybe_stat(NoWait, Q), Q} end),
     ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
     return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount,
                             State);
@@ -1066,8 +1239,10 @@ handle_method(#'queue.delete'{queue     = QueueNameBin,
                    rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
                    rabbit_amqqueue:delete(Q, IfUnused, IfEmpty)
            end,
-           fun (not_found)   -> {ok, 0};
-               ({absent, Q}) -> rabbit_misc:absent(Q)
+           fun (not_found)            -> {ok, 0};
+               ({absent, Q, crashed}) -> rabbit_amqqueue:delete_crashed(Q),
+                                         {ok, 0};
+               ({absent, Q, Reason})  -> rabbit_misc:absent(Q, Reason)
            end) of
         {error, in_use} ->
             precondition_failed("~s in use", [rabbit_misc:rs(QueueName)]);
@@ -1204,6 +1379,9 @@ basic_consume(QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
             E
     end.
 
+maybe_stat(false, Q) -> rabbit_amqqueue:stat(Q);
+maybe_stat(true, _Q) -> {ok, 0, 0}.
+
 consumer_monitor(ConsumerTag,
                  State = #ch{consumer_mapping = ConsumerMapping,
                              queue_monitors   = QMons,
@@ -1313,8 +1491,8 @@ binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin,
              end) of
         {error, {resources_missing, [{not_found, Name} | _]}} ->
             rabbit_misc:not_found(Name);
-        {error, {resources_missing, [{absent, Q} | _]}} ->
-            rabbit_misc:absent(Q);
+        {error, {resources_missing, [{absent, Q, Reason} | _]}} ->
+            rabbit_misc:absent(Q, Reason);
         {error, binding_not_found} ->
             rabbit_misc:protocol_error(
               not_found, "no binding ~s between ~s and ~s",
@@ -1365,7 +1543,10 @@ record_sent(ConsumerTag, AckRequired,
             Msg = {QName, QPid, MsgId, Redelivered, _Message},
             State = #ch{unacked_message_q = UAMQ,
                         next_tag          = DeliveryTag,
-                        trace_state       = TraceState}) ->
+                        trace_state       = TraceState,
+                        user              = #user{username = Username},
+                        conn_name         = ConnName,
+                        channel           = ChannelNum}) ->
     ?INCR_STATS([{queue_stats, QName, 1}], case {ConsumerTag, AckRequired} of
                                                {none,  true} -> get;
                                                {none, false} -> get_no_ack;
@@ -1376,7 +1557,7 @@ record_sent(ConsumerTag, AckRequired,
         true  -> ?INCR_STATS([{queue_stats, QName, 1}], redeliver, State);
         false -> ok
     end,
-    rabbit_trace:tap_out(Msg, TraceState),
+    rabbit_trace:tap_out(Msg, ConnName, ChannelNum, Username, TraceState),
     UAMQ1 = case AckRequired of
                 true  -> queue:in({DeliveryTag, ConsumerTag, {QPid, MsgId}},
                                   UAMQ);
@@ -1495,7 +1676,7 @@ deliver_to_queues({Delivery = #delivery{message    = Message = #basic_message{
                    DelQNames}, State = #ch{queue_names    = QNames,
                                            queue_monitors = QMons}) ->
     Qs = rabbit_amqqueue:lookup(DelQNames),
-    DeliveredQPids = rabbit_amqqueue:deliver_flow(Qs, Delivery),
+    DeliveredQPids = rabbit_amqqueue:deliver(Qs, Delivery),
     %% The pmon:monitor_all/2 monitors all queues to which we
     %% delivered. But we want to monitor even queues we didn't deliver
     %% to, since we need their 'DOWN' messages to clean
@@ -1565,7 +1746,7 @@ send_nacks(_, State) ->
 send_confirms(State = #ch{tx = none, confirmed = []}) ->
     State;
 send_confirms(State = #ch{tx = none, confirmed = C}) ->
-    case rabbit_node_monitor:pause_minority_guard() of
+    case rabbit_node_monitor:pause_partition_guard() of
         ok      -> MsgSeqNos =
                        lists:foldl(
                          fun ({MsgSeqNo, XName}, MSNs) ->
@@ -1577,7 +1758,7 @@ send_confirms(State = #ch{tx = none, confirmed = C}) ->
         pausing -> State
     end;
 send_confirms(State) ->
-    case rabbit_node_monitor:pause_minority_guard() of
+    case rabbit_node_monitor:pause_partition_guard() of
         ok      -> maybe_complete_tx(State);
         pausing -> State
     end.
index 81c17fbfbe223542656d80ae4570950aaaedfb91..25c5df8a7b27a38b88f6a20e3c213fc9ec6d7f4a 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% Since the AMQP methods used here are queue related,
@@ -33,7 +33,7 @@
 -callback description() -> [proplists:property()].
 
 -callback intercept(original_method(), rabbit_types:vhost()) ->
-    rabbit_types:ok_or_error2(processed_method(), any()).
+    processed_method() | rabbit_misc:channel_or_connection_exit().
 
 %% Whether the interceptor wishes to intercept the amqp method
 -callback applies_to(intercept_method()) -> boolean().
@@ -62,20 +62,15 @@ intercept_method(M, VHost) ->
 intercept_method(M, _VHost, []) ->
     M;
 intercept_method(M, VHost, [I]) ->
-    case I:intercept(M, VHost) of
-        {ok, M2} ->
-            case validate_method(M, M2) of
-                true ->
-                    M2;
-                _   ->
-                    internal_error("Interceptor: ~p expected "
-                                   "to return method: ~p but returned: ~p",
-                                   [I, rabbit_misc:method_record_type(M),
-                                       rabbit_misc:method_record_type(M2)])
-            end;
-        {error, Reason} ->
-            internal_error("Interceptor: ~p failed with reason: ~p",
-                           [I, Reason])
+    M2 = I:intercept(M, VHost),
+    case validate_method(M, M2) of
+        true ->
+            M2;
+        _   ->
+            internal_error("Interceptor: ~p expected "
+                                "to return method: ~p but returned: ~p",
+                                [I, rabbit_misc:method_record_type(M),
+                                 rabbit_misc:method_record_type(M2)])
     end;
 intercept_method(M, _VHost, Is) ->
     internal_error("More than one interceptor for method: ~p -- ~p",
index 448d17a29443143d1db73595745c727e919a6518..e8f45f7305a58421e38c719135efa6fa0c12a261 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_channel_sup).
index d0e82548967ee75ee497d4c60176a3173bff629b..2be2af91a7ae039a1f1ea25f33c348a8385df4ee 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_channel_sup_sup).
diff --git a/rabbitmq-server/src/rabbit_cli.erl b/rabbitmq-server/src/rabbit_cli.erl
new file mode 100644 (file)
index 0000000..33098ce
--- /dev/null
@@ -0,0 +1,225 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_cli).
+-include("rabbit_cli.hrl").
+
+-export([main/3, start_distribution/0, start_distribution/1,
+         parse_arguments/4, rpc_call/4, rpc_call/5]).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-type(optdef() :: flag | {option, string()}).
+-type(parse_result() :: {'ok', {atom(), [{string(), string()}], [string()]}} |
+                        'no_command').
+
+
+-spec(main/3 :: (fun (([string()], string()) -> parse_result()),
+                     fun ((atom(), atom(), [any()], [any()]) -> any()),
+                         atom()) -> no_return()).
+-spec(start_distribution/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(start_distribution/1 :: (string()) -> {'ok', pid()} | {'error', any()}).
+-spec(usage/1 :: (atom()) -> no_return()).
+-spec(parse_arguments/4 ::
+        ([{atom(), [{string(), optdef()}]} | atom()],
+         [{string(), optdef()}], string(), [string()]) -> parse_result()).
+-spec(rpc_call/4 :: (node(), atom(), atom(), [any()]) -> any()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+main(ParseFun, DoFun, UsageMod) ->
+    error_logger:tty(false),
+    start_distribution(),
+    {ok, [[NodeStr|_]|_]} = init:get_argument(nodename),
+    {Command, Opts, Args} =
+        case ParseFun(init:get_plain_arguments(), NodeStr) of
+            {ok, Res}  -> Res;
+            no_command -> print_error("could not recognise command", []),
+                          usage(UsageMod)
+        end,
+    Node = proplists:get_value(?NODE_OPT, Opts),
+    PrintInvalidCommandError =
+        fun () ->
+                print_error("invalid command '~s'",
+                            [string:join([atom_to_list(Command) | Args], " ")])
+        end,
+
+    %% The reason we don't use a try/catch here is that rpc:call turns
+    %% thrown errors into normal return values
+    case catch DoFun(Command, Node, Args, Opts) of
+        ok ->
+            rabbit_misc:quit(0);
+        {'EXIT', {function_clause, [{?MODULE, action, _}    | _]}} -> %% < R15
+            PrintInvalidCommandError(),
+            usage(UsageMod);
+        {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} -> %% >= R15
+            PrintInvalidCommandError(),
+            usage(UsageMod);
+        {error, {missing_dependencies, Missing, Blame}} ->
+            print_error("dependent plugins ~p not found; used by ~p.",
+                        [Missing, Blame]),
+            rabbit_misc:quit(2);
+        {'EXIT', {badarg, _}} ->
+            print_error("invalid parameter: ~p", [Args]),
+            usage(UsageMod);
+        {error, {Problem, Reason}} when is_atom(Problem), is_binary(Reason) ->
+            %% We handle this common case specially to avoid ~p since
+            %% that has i18n issues
+            print_error("~s: ~s", [Problem, Reason]),
+            rabbit_misc:quit(2);
+        {error, Reason} ->
+            print_error("~p", [Reason]),
+            rabbit_misc:quit(2);
+        {error_string, Reason} ->
+            print_error("~s", [Reason]),
+            rabbit_misc:quit(2);
+        {badrpc, {'EXIT', Reason}} ->
+            print_error("~p", [Reason]),
+            rabbit_misc:quit(2);
+        {badrpc, Reason} ->
+            case Reason of
+                timeout ->
+                    print_error("operation ~w on node ~w timed out", [Command, Node]);
+                _ ->
+                    print_error("unable to connect to node ~w: ~w", [Node, Reason]),
+                    print_badrpc_diagnostics([Node])
+            end,
+            rabbit_misc:quit(2);
+        {badrpc_multi, Reason, Nodes} ->
+            print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
+            print_badrpc_diagnostics(Nodes),
+            rabbit_misc:quit(2);
+        Other ->
+            print_error("~p", [Other]),
+            rabbit_misc:quit(2)
+    end.
+
+start_distribution() ->
+    start_distribution(list_to_atom(
+                         rabbit_misc:format("rabbitmq-cli-~s", [os:getpid()]))).
+
+start_distribution(Name) ->
+    rabbit_nodes:ensure_epmd(),
+    net_kernel:start([Name, name_type()]).
+
+name_type() ->
+    case os:getenv("RABBITMQ_USE_LONGNAME") of
+        "true" -> longnames;
+        _      -> shortnames
+    end.
+
+usage(Mod) ->
+    io:format("~s", [Mod:usage()]),
+    rabbit_misc:quit(1).
+
+%%----------------------------------------------------------------------------
+
+parse_arguments(Commands, GlobalDefs, NodeOpt, CmdLine) ->
+    case parse_arguments(Commands, GlobalDefs, CmdLine) of
+        {ok, {Cmd, Opts0, Args}} ->
+            Opts = [case K of
+                        NodeOpt -> {NodeOpt, rabbit_nodes:make(V)};
+                        _       -> {K, V}
+                    end || {K, V} <- Opts0],
+            {ok, {Cmd, Opts, Args}};
+        E ->
+            E
+    end.
+
+%% Takes:
+%%    * A list of [{atom(), [{string(), optdef()]} | atom()], where the atom()s
+%%      are the accepted commands and the optional [string()] is the list of
+%%      accepted options for that command
+%%    * A list [{string(), optdef()}] of options valid for all commands
+%%    * The list of arguments given by the user
+%%
+%% Returns either {ok, {atom(), [{string(), string()}], [string()]} which are
+%% respectively the command, the key-value pairs of the options and the leftover
+%% arguments; or no_command if no command could be parsed.
+parse_arguments(Commands, GlobalDefs, As) ->
+    lists:foldl(maybe_process_opts(GlobalDefs, As), no_command, Commands).
+
+maybe_process_opts(GDefs, As) ->
+    fun({C, Os}, no_command) ->
+            process_opts(atom_to_list(C), dict:from_list(GDefs ++ Os), As);
+       (C, no_command) ->
+            (maybe_process_opts(GDefs, As))({C, []}, no_command);
+       (_, {ok, Res}) ->
+            {ok, Res}
+    end.
+
+process_opts(C, Defs, As0) ->
+    KVs0 = dict:map(fun (_, flag)        -> false;
+                        (_, {option, V}) -> V
+                    end, Defs),
+    process_opts(Defs, C, As0, not_found, KVs0, []).
+
+%% Consume flags/options until you find the correct command. If there are no
+%% arguments or the first argument is not the command we're expecting, fail.
+%% Arguments to this are: definitions, cmd we're looking for, args we
+%% haven't parsed, whether we have found the cmd, options we've found,
+%% plain args we've found.
+process_opts(_Defs, C, [], found, KVs, Outs) ->
+    {ok, {list_to_atom(C), dict:to_list(KVs), lists:reverse(Outs)}};
+process_opts(_Defs, _C, [], not_found, _, _) ->
+    no_command;
+process_opts(Defs, C, [A | As], Found, KVs, Outs) ->
+    OptType = case dict:find(A, Defs) of
+                  error             -> none;
+                  {ok, flag}        -> flag;
+                  {ok, {option, _}} -> option
+              end,
+    case {OptType, C, Found} of
+        {flag, _, _}     -> process_opts(
+                              Defs, C, As, Found, dict:store(A, true, KVs),
+                              Outs);
+        {option, _, _}   -> case As of
+                                []        -> no_command;
+                                [V | As1] -> process_opts(
+                                               Defs, C, As1, Found,
+                                               dict:store(A, V, KVs), Outs)
+                            end;
+        {none, A, _}     -> process_opts(Defs, C, As, found, KVs, Outs);
+        {none, _, found} -> process_opts(Defs, C, As, found, KVs, [A | Outs]);
+        {none, _, _}     -> no_command
+    end.
+
+%%----------------------------------------------------------------------------
+
+fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args).
+
+print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args).
+
+print_badrpc_diagnostics(Nodes) ->
+    fmt_stderr(rabbit_nodes:diagnostics(Nodes), []).
+
+%% If the server we are talking to has non-standard net_ticktime, and
+%% our connection lasts a while, we could get disconnected because of
+%% a timeout unless we set our ticktime to be the same. So let's do
+%% that.
+rpc_call(Node, Mod, Fun, Args) ->
+    rpc_call(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
+
+rpc_call(Node, Mod, Fun, Args, Timeout) ->
+    case rpc:call(Node, net_kernel, get_net_ticktime, [], Timeout) of
+        {badrpc, _} = E -> E;
+        Time            -> net_kernel:set_net_ticktime(Time, 0),
+                           rpc:call(Node, Mod, Fun, Args, Timeout)
+    end.
index dcf8c9e226e867f4c9c3f4c78da8dbec9e76c20a..5348d012d583197671ee1c50172e4e2146f99c87 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_client_sup).
index 20397cc036fef3dc204ed69f6e9364da973b5da7..f93b85b122a0b685a8e3516b5870882567de3e2d 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_command_assembler).
index 85266bd6ba5308b39d8e5416fadb2f63f760311f..d3c05ee4161a68e8f16fe3f0c4754142d39d27c4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_connection_helper_sup).
index 1dfdadaed7660d155468d71131797ba922f6e6d0..982608556aba9a592615e121cc6f1641ea1dec39 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_connection_sup).
index 451f4d70d02dbb35c60d969fa1ba1ac619ba0f7d..fe0563bbc7c9563ffb12c8d6e70488349099c3dc 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_control_main).
 -include("rabbit.hrl").
+-include("rabbit_cli.hrl").
 
 -export([start/0, stop/0, parse_arguments/2, action/5,
-         sync_queue/1, cancel_sync_queue/1]).
+         sync_queue/1, cancel_sync_queue/1, become/1,
+         purge_queue/1]).
 
--define(RPC_TIMEOUT, infinity).
--define(EXTERNAL_CHECK_INTERVAL, 1000).
-
--define(QUIET_OPT, "-q").
--define(NODE_OPT, "-n").
--define(VHOST_OPT, "-p").
--define(PRIORITY_OPT, "--priority").
--define(APPLY_TO_OPT, "--apply-to").
--define(RAM_OPT, "--ram").
--define(OFFLINE_OPT, "--offline").
+-import(rabbit_cli, [rpc_call/4, rpc_call/5]).
 
--define(QUIET_DEF, {?QUIET_OPT, flag}).
--define(NODE_DEF(Node), {?NODE_OPT, {option, Node}}).
--define(VHOST_DEF, {?VHOST_OPT, {option, "/"}}).
--define(PRIORITY_DEF, {?PRIORITY_OPT, {option, "0"}}).
--define(APPLY_TO_DEF, {?APPLY_TO_OPT, {option, "all"}}).
--define(RAM_DEF, {?RAM_OPT, flag}).
--define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
+-define(EXTERNAL_CHECK_INTERVAL, 1000).
 
--define(GLOBAL_DEFS(Node), [?QUIET_DEF, ?NODE_DEF(Node)]).
+-define(GLOBAL_DEFS(Node), [?QUIET_DEF, ?NODE_DEF(Node), ?TIMEOUT_DEF]).
 
 -define(COMMANDS,
         [stop,
          change_cluster_node_type,
          update_cluster_nodes,
          {forget_cluster_node, [?OFFLINE_DEF]},
+         rename_cluster_node,
+         force_boot,
          cluster_status,
          {sync_queue, [?VHOST_DEF]},
          {cancel_sync_queue, [?VHOST_DEF]},
+         {purge_queue, [?VHOST_DEF]},
 
          add_user,
          delete_user,
          {"Policies",   rabbit_policy,             list_formatted, info_keys},
          {"Parameters", rabbit_runtime_parameters, list_formatted, info_keys}]).
 
+-define(COMMANDS_NOT_REQUIRING_APP,
+        [stop, stop_app, start_app, wait, reset, force_reset, rotate_logs,
+         join_cluster, change_cluster_node_type, update_cluster_nodes,
+         forget_cluster_node, rename_cluster_node, cluster_status, status,
+         environment, eval, force_boot]).
+
+-define(COMMANDS_WITH_TIMEOUT,
+        [list_user_permissions, list_policies, list_queues, list_exchanges,
+        list_bindings, list_connections, list_channels, list_consumers,
+        list_vhosts, list_parameters,
+        purge_queue]).
+
 %%----------------------------------------------------------------------------
 
 -ifdef(use_specs).
         (atom(), node(), [string()], [{string(), any()}],
          fun ((string(), [any()]) -> 'ok'))
         -> 'ok').
--spec(usage/0 :: () -> no_return()).
+
+-spec(action/6 ::
+        (atom(), node(), [string()], [{string(), any()}],
+         fun ((string(), [any()]) -> 'ok'), timeout())
+        -> 'ok').
 
 -endif.
 
 %%----------------------------------------------------------------------------
 
 start() ->
-    {ok, [[NodeStr|_]|_]} = init:get_argument(nodename),
-    {Command, Opts, Args} =
-        case parse_arguments(init:get_plain_arguments(), NodeStr) of
-            {ok, Res}  -> Res;
-            no_command -> print_error("could not recognise command", []),
-                          usage()
-        end,
-    Quiet = proplists:get_bool(?QUIET_OPT, Opts),
-    Node = proplists:get_value(?NODE_OPT, Opts),
-    Inform = case Quiet of
-                 true  -> fun (_Format, _Args1) -> ok end;
-                 false -> fun (Format, Args1) ->
-                                  io:format(Format ++ " ...~n", Args1)
-                          end
-             end,
-    PrintInvalidCommandError =
-        fun () ->
-                print_error("invalid command '~s'",
-                            [string:join([atom_to_list(Command) | Args], " ")])
-        end,
-
-    %% The reason we don't use a try/catch here is that rpc:call turns
-    %% thrown errors into normal return values
-    case catch action(Command, Node, Args, Opts, Inform) of
-        ok ->
-            case Quiet of
-                true  -> ok;
-                false -> io:format("...done.~n")
-            end,
-            rabbit_misc:quit(0);
-        {ok, Info} ->
-            case Quiet of
-                true  -> ok;
-                false -> io:format("...done (~p).~n", [Info])
-            end,
-            rabbit_misc:quit(0);
-        {'EXIT', {function_clause, [{?MODULE, action, _}    | _]}} -> %% < R15
-            PrintInvalidCommandError(),
-            usage();
-        {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} -> %% >= R15
-            PrintInvalidCommandError(),
-            usage();
-        {'EXIT', {badarg, _}} ->
-            print_error("invalid parameter: ~p", [Args]),
-            usage();
-        {error, {Problem, Reason}} when is_atom(Problem), is_binary(Reason) ->
-            %% We handle this common case specially to avoid ~p since
-            %% that has i18n issues
-            print_error("~s: ~s", [Problem, Reason]),
-            rabbit_misc:quit(2);
-        {error, Reason} ->
-            print_error("~p", [Reason]),
-            rabbit_misc:quit(2);
-        {error_string, Reason} ->
-            print_error("~s", [Reason]),
-            rabbit_misc:quit(2);
-        {badrpc, {'EXIT', Reason}} ->
-            print_error("~p", [Reason]),
-            rabbit_misc:quit(2);
-        {badrpc, Reason} ->
-            print_error("unable to connect to node ~w: ~w", [Node, Reason]),
-            print_badrpc_diagnostics([Node]),
-            rabbit_misc:quit(2);
-        {badrpc_multi, Reason, Nodes} ->
-            print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
-            print_badrpc_diagnostics(Nodes),
-            rabbit_misc:quit(2);
-        Other ->
-            print_error("~p", [Other]),
-            rabbit_misc:quit(2)
-    end.
+    rabbit_cli:main(
+      fun (Args, NodeStr) ->
+              parse_arguments(Args, NodeStr)
+      end,
+      fun (Command, Node, Args, Opts) ->
+              Quiet = proplists:get_bool(?QUIET_OPT, Opts),
+              Inform = case Quiet of
+                           true  -> fun (_Format, _Args1) -> ok end;
+                           false -> fun (Format, Args1) ->
+                                            io:format(Format ++ " ...~n", Args1)
+                                    end
+                       end,
+              try
+                  T = case get_timeout(Opts) of
+                          {ok, Timeout} ->
+                              Timeout;
+                          {error, _} ->
+                              %% since this is an error with user input, ignore the quiet
+                              %% setting
+                              io:format("Failed to parse provided timeout value, using ~s~n", [?RPC_TIMEOUT]),
+                              ?RPC_TIMEOUT
+                  end,
+                  do_action(Command, Node, Args, Opts, Inform, T)
+              catch _:E -> E
+              end
+      end, rabbit_ctl_usage).
 
-fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args).
+parse_arguments(CmdLine, NodeStr) ->
+    rabbit_cli:parse_arguments(
+      ?COMMANDS, ?GLOBAL_DEFS(NodeStr), ?NODE_OPT, CmdLine).
 
 print_report(Node, {Descr, Module, InfoFun, KeysFun}) ->
     io:format("~s:~n", [Descr]),
@@ -222,33 +185,66 @@ print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg) ->
     end,
     io:nl().
 
-print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args).
+get_timeout(Opts) ->
+    parse_timeout(proplists:get_value(?TIMEOUT_OPT, Opts, ?RPC_TIMEOUT)).
 
-print_badrpc_diagnostics(Nodes) ->
-    fmt_stderr(rabbit_nodes:diagnostics(Nodes), []).
+parse_number(N) when is_list(N) ->
+    try list_to_integer(N) of
+        Val -> Val
+    catch error:badarg ->
+            %% could have been a float, give it
+            %% another shot
+            list_to_float(N)
+    end.
+
+parse_timeout("infinity") ->
+    {ok, infinity};
+parse_timeout(infinity) ->
+    {ok, infinity};
+parse_timeout(N) when is_list(N) ->
+    try parse_number(N) of
+        M ->
+            Y = case M >= 0 of
+                    true  -> round(M) * 1000;
+                    false -> ?RPC_TIMEOUT
+                end,
+            {ok, Y}
+    catch error:badarg ->
+        {error, infinity}
+    end;
+parse_timeout(N) ->
+    {ok, N}.
+
+announce_timeout(infinity, _Inform) ->
+    %% no-op
+    ok;
+announce_timeout(Timeout, Inform) when is_number(Timeout) ->
+    Inform("Timeout: ~w seconds", [Timeout/1000]),
+    ok.
 
 stop() ->
     ok.
 
-usage() ->
-    io:format("~s", [rabbit_ctl_usage:usage()]),
-    rabbit_misc:quit(1).
+%%----------------------------------------------------------------------------
 
-parse_arguments(CmdLine, NodeStr) ->
-    case rabbit_misc:parse_arguments(
-           ?COMMANDS, ?GLOBAL_DEFS(NodeStr), CmdLine) of
-        {ok, {Cmd, Opts0, Args}} ->
-            Opts = [case K of
-                        ?NODE_OPT -> {?NODE_OPT, rabbit_nodes:make(V)};
-                        _         -> {K, V}
-                    end || {K, V} <- Opts0],
-            {ok, {Cmd, Opts, Args}};
-        E ->
-            E
+do_action(Command, Node, Args, Opts, Inform, Timeout) ->
+    case lists:member(Command, ?COMMANDS_NOT_REQUIRING_APP) of
+        false ->
+            case ensure_app_running(Node) of
+                ok ->
+                    case lists:member(Command, ?COMMANDS_WITH_TIMEOUT) of
+                        true  ->
+                            announce_timeout(Timeout, Inform),
+                            action(Command, Node, Args, Opts, Inform, Timeout);
+                        false ->
+                            action(Command, Node, Args, Opts, Inform)
+                    end;
+                E  -> E
+            end;
+        true  ->
+            action(Command, Node, Args, Opts, Inform)
     end.
 
-%%----------------------------------------------------------------------------
-
 action(stop, Node, Args, _Opts, Inform) ->
     Inform("Stopping and halting node ~p", [Node]),
     Res = call(Node, {rabbit, stop_and_halt, []}),
@@ -302,8 +298,26 @@ action(forget_cluster_node, Node, [ClusterNodeS], Opts, Inform) ->
     ClusterNode = list_to_atom(ClusterNodeS),
     RemoveWhenOffline = proplists:get_bool(?OFFLINE_OPT, Opts),
     Inform("Removing node ~p from cluster", [ClusterNode]),
-    rpc_call(Node, rabbit_mnesia, forget_cluster_node,
-             [ClusterNode, RemoveWhenOffline]);
+    case RemoveWhenOffline of
+        true  -> become(Node),
+                 rabbit_mnesia:forget_cluster_node(ClusterNode, true);
+        false -> rpc_call(Node, rabbit_mnesia, forget_cluster_node,
+                          [ClusterNode, false])
+    end;
+
+action(rename_cluster_node, Node, NodesS, _Opts, Inform) ->
+    Nodes = split_list([list_to_atom(N) || N <- NodesS]),
+    Inform("Renaming cluster nodes:~n~s~n",
+           [lists:flatten([rabbit_misc:format("  ~s -> ~s~n", [F, T]) ||
+                              {F, T} <- Nodes])]),
+    rabbit_mnesia_rename:rename(Node, Nodes);
+
+action(force_boot, Node, [], _Opts, Inform) ->
+    Inform("Forcing boot for Mnesia dir ~s", [mnesia:system_info(directory)]),
+    case rabbit:is_running(Node) of
+        false -> rabbit_mnesia:force_load_next_boot();
+        true  -> {error, rabbit_running}
+    end;
 
 action(sync_queue, Node, [Q], Opts, Inform) ->
     VHost = proplists:get_value(?VHOST_OPT, Opts),
@@ -370,12 +384,6 @@ action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) ->
     rpc_call(Node, rabbit_auth_backend_internal, set_tags,
              [list_to_binary(Username), Tags]);
 
-action(list_users, Node, [], _Opts, Inform) ->
-    Inform("Listing users", []),
-    display_info_list(
-      call(Node, {rabbit_auth_backend_internal, list_users, []}),
-      rabbit_auth_backend_internal:user_info_keys());
-
 action(add_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
     Inform("Creating vhost \"~s\"", Args),
     call(Node, {rabbit_vhost, add, Args});
@@ -384,63 +392,6 @@ action(delete_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
     Inform("Deleting vhost \"~s\"", Args),
     call(Node, {rabbit_vhost, delete, Args});
 
-action(list_vhosts, Node, Args, _Opts, Inform) ->
-    Inform("Listing vhosts", []),
-    ArgAtoms = default_if_empty(Args, [name]),
-    display_info_list(call(Node, {rabbit_vhost, info_all, []}), ArgAtoms);
-
-action(list_user_permissions, Node, Args = [_Username], _Opts, Inform) ->
-    Inform("Listing permissions for user ~p", Args),
-    display_info_list(call(Node, {rabbit_auth_backend_internal,
-                                  list_user_permissions, Args}),
-                      rabbit_auth_backend_internal:user_perms_info_keys());
-
-action(list_queues, Node, Args, Opts, Inform) ->
-    Inform("Listing queues", []),
-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
-    ArgAtoms = default_if_empty(Args, [name, messages]),
-    display_info_list(rpc_call(Node, rabbit_amqqueue, info_all,
-                               [VHostArg, ArgAtoms]),
-                      ArgAtoms);
-
-action(list_exchanges, Node, Args, Opts, Inform) ->
-    Inform("Listing exchanges", []),
-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
-    ArgAtoms = default_if_empty(Args, [name, type]),
-    display_info_list(rpc_call(Node, rabbit_exchange, info_all,
-                               [VHostArg, ArgAtoms]),
-                      ArgAtoms);
-
-action(list_bindings, Node, Args, Opts, Inform) ->
-    Inform("Listing bindings", []),
-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
-    ArgAtoms = default_if_empty(Args, [source_name, source_kind,
-                                       destination_name, destination_kind,
-                                       routing_key, arguments]),
-    display_info_list(rpc_call(Node, rabbit_binding, info_all,
-                               [VHostArg, ArgAtoms]),
-                      ArgAtoms);
-
-action(list_connections, Node, Args, _Opts, Inform) ->
-    Inform("Listing connections", []),
-    ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
-    display_info_list(rpc_call(Node, rabbit_networking, connection_info_all,
-                               [ArgAtoms]),
-                      ArgAtoms);
-
-action(list_channels, Node, Args, _Opts, Inform) ->
-    Inform("Listing channels", []),
-    ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
-                                       messages_unacknowledged]),
-    display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms]),
-                      ArgAtoms);
-
-action(list_consumers, Node, _Args, Opts, Inform) ->
-    Inform("Listing consumers", []),
-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
-    display_info_list(rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]),
-                      rabbit_amqqueue:consumer_info_keys());
-
 action(trace_on, Node, [], Opts, Inform) ->
     VHost = proplists:get_value(?VHOST_OPT, Opts),
     Inform("Starting tracing for vhost \"~s\"", [VHost]),
@@ -473,13 +424,6 @@ action(clear_permissions, Node, [Username], Opts, Inform) ->
     call(Node, {rabbit_auth_backend_internal, clear_permissions,
                 [Username, VHost]});
 
-action(list_permissions, Node, [], Opts, Inform) ->
-    VHost = proplists:get_value(?VHOST_OPT, Opts),
-    Inform("Listing permissions in vhost \"~s\"", [VHost]),
-    display_info_list(call(Node, {rabbit_auth_backend_internal,
-                             list_vhost_permissions, [VHost]}),
-                      rabbit_auth_backend_internal:vhost_perms_info_keys());
-
 action(set_parameter, Node, [Component, Key, Value], Opts, Inform) ->
     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
     Inform("Setting runtime parameter ~p for component ~p to ~p",
@@ -495,13 +439,6 @@ action(clear_parameter, Node, [Component, Key], Opts, Inform) ->
                                                       list_to_binary(Component),
                                                       list_to_binary(Key)]);
 
-action(list_parameters, Node, [], Opts, Inform) ->
-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
-    Inform("Listing runtime parameters", []),
-    display_info_list(
-      rpc_call(Node, rabbit_runtime_parameters, list_formatted, [VHostArg]),
-      rabbit_runtime_parameters:info_keys());
-
 action(set_policy, Node, [Key, Pattern, Defn], Opts, Inform) ->
     Msg = "Setting policy ~p for pattern ~p to ~p with priority ~p",
     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
@@ -517,12 +454,6 @@ action(clear_policy, Node, [Key], Opts, Inform) ->
     Inform("Clearing policy ~p", [Key]),
     rpc_call(Node, rabbit_policy, delete, [VHostArg, list_to_binary(Key)]);
 
-action(list_policies, Node, [], Opts, Inform) ->
-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
-    Inform("Listing policies", []),
-    display_info_list(rpc_call(Node, rabbit_policy, list_formatted, [VHostArg]),
-                      rabbit_policy:info_keys());
-
 action(report, Node, _Args, _Opts, Inform) ->
     Inform("Reporting server status on ~p~n~n", [erlang:universaltime()]),
     [begin ok = action(Action, N, [], [], Inform), io:nl() end ||
@@ -550,7 +481,113 @@ action(eval, Node, [Expr], _Opts, _Inform) ->
             end;
         {error, E, _} ->
             {error_string, format_parse_error(E)}
-    end.
+    end;
+
+action(Command, Node, Args, Opts, Inform) ->
+    %% For backward compatibility, run commands accepting a timeout with
+    %% the default timeout.
+    action(Command, Node, Args, Opts, Inform, ?RPC_TIMEOUT).
+
+action(purge_queue, _Node, [], _Opts, _Inform, _Timeout) ->
+    {error, "purge_queue takes queue name as an argument"};
+
+action(purge_queue, Node, [Q], Opts, Inform, Timeout) ->
+    VHost = proplists:get_value(?VHOST_OPT, Opts),
+    QRes = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
+    Inform("Purging ~s", [rabbit_misc:rs(QRes)]),
+    rpc_call(Node, rabbit_control_main, purge_queue, [QRes], Timeout);
+
+action(list_users, Node, [], _Opts, Inform, Timeout) ->
+    Inform("Listing users", []),
+    display_info_list(
+      call(Node, {rabbit_auth_backend_internal, list_users, []}, Timeout),
+      rabbit_auth_backend_internal:user_info_keys());
+
+action(list_permissions, Node, [], Opts, Inform, Timeout) ->
+    VHost = proplists:get_value(?VHOST_OPT, Opts),
+    Inform("Listing permissions in vhost \"~s\"", [VHost]),
+    display_info_list(call(Node, {rabbit_auth_backend_internal,
+                             list_vhost_permissions, [VHost]}, Timeout),
+                      rabbit_auth_backend_internal:vhost_perms_info_keys());
+
+action(list_parameters, Node, [], Opts, Inform, Timeout) ->
+    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+    Inform("Listing runtime parameters", []),
+    display_info_list(
+      rpc_call(Node, rabbit_runtime_parameters, list_formatted, [VHostArg],
+               Timeout),
+      rabbit_runtime_parameters:info_keys());
+
+action(list_policies, Node, [], Opts, Inform, Timeout) ->
+    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+    Inform("Listing policies", []),
+    display_info_list(rpc_call(Node, rabbit_policy, list_formatted, [VHostArg],
+                              Timeout),
+                      rabbit_policy:info_keys());
+
+action(list_vhosts, Node, Args, _Opts, Inform, Timeout) ->
+    Inform("Listing vhosts", []),
+    ArgAtoms = default_if_empty(Args, [name]),
+    display_info_list(call(Node, {rabbit_vhost, info_all, []}, Timeout),
+                      ArgAtoms);
+
+action(list_user_permissions, _Node, _Args = [], _Opts, _Inform, _Timeout) ->
+    {error_string,
+     "list_user_permissions expects a username argument, but none provided."};
+action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout) ->
+    Inform("Listing permissions for user ~p", Args),
+    display_info_list(call(Node, {rabbit_auth_backend_internal,
+                                  list_user_permissions, Args}, Timeout),
+                      rabbit_auth_backend_internal:user_perms_info_keys());
+
+action(list_queues, Node, Args, Opts, Inform, Timeout) ->
+    Inform("Listing queues", []),
+    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+    ArgAtoms = default_if_empty(Args, [name, messages]),
+    display_info_list(rpc_call(Node, rabbit_amqqueue, info_all,
+                               [VHostArg, ArgAtoms], Timeout),
+                      ArgAtoms);
+
+action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
+    Inform("Listing exchanges", []),
+    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+    ArgAtoms = default_if_empty(Args, [name, type]),
+    display_info_list(rpc_call(Node, rabbit_exchange, info_all,
+                               [VHostArg, ArgAtoms], Timeout),
+                      ArgAtoms);
+
+action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
+    Inform("Listing bindings", []),
+    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+    ArgAtoms = default_if_empty(Args, [source_name, source_kind,
+                                       destination_name, destination_kind,
+                                       routing_key, arguments]),
+    display_info_list(rpc_call(Node, rabbit_binding, info_all,
+                               [VHostArg, ArgAtoms], Timeout),
+                      ArgAtoms);
+
+action(list_connections, Node, Args, _Opts, Inform, Timeout) ->
+    Inform("Listing connections", []),
+    ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
+    display_info_list(rpc_call(Node, rabbit_networking, connection_info_all,
+                               [ArgAtoms], Timeout),
+                      ArgAtoms);
+
+action(list_channels, Node, Args, _Opts, Inform, Timeout) ->
+    Inform("Listing channels", []),
+    ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
+                                       messages_unacknowledged]),
+    display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms],
+                               Timeout),
+                      ArgAtoms);
+
+action(list_consumers, Node, _Args, Opts, Inform, Timeout) ->
+    Inform("Listing consumers", []),
+    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+    display_info_list(rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg],
+                               Timeout),
+                      rabbit_amqqueue:consumer_info_keys()).
+
 
 format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
 
@@ -564,6 +601,13 @@ cancel_sync_queue(Q) ->
                  rabbit_amqqueue:cancel_sync_mirrors(QPid)
          end).
 
+purge_queue(Q) ->
+    rabbit_amqqueue:with(
+      Q, fun(Q1) ->
+                 rabbit_amqqueue:purge(Q1),
+                 ok
+         end).
+
 %%----------------------------------------------------------------------------
 
 wait_for_application(Node, PidFile, Application, Inform) ->
@@ -582,7 +626,7 @@ wait_for_startup(Node, Pid) ->
       Node, Pid, fun() -> rpc:call(Node, rabbit, await_startup, []) =:= ok end).
 
 while_process_is_alive(Node, Pid, Activity) ->
-    case process_up(Pid) of
+    case rabbit_misc:is_os_process_alive(Pid) of
         true  -> case Activity() of
                      true  -> ok;
                      false -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
@@ -592,7 +636,7 @@ while_process_is_alive(Node, Pid, Activity) ->
     end.
 
 wait_for_process_death(Pid) ->
-    case process_up(Pid) of
+    case rabbit_misc:is_os_process_alive(Pid) of
         true  -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
                  wait_for_process_death(Pid);
         false -> ok
@@ -616,38 +660,16 @@ read_pid_file(PidFile, Wait) ->
             exit({error, {could_not_read_pid, E}})
     end.
 
-% Test using some OS clunkiness since we shouldn't trust
-% rpc:call(os, getpid, []) at this point
-process_up(Pid) ->
-    with_os([{unix, fun () ->
-                            run_ps(Pid) =:= 0
-                    end},
-             {win32, fun () ->
-                             Cmd = "tasklist /nh /fi \"pid eq " ++ Pid ++ "\" ",
-                             Res = rabbit_misc:os_cmd(Cmd ++ "2>&1"),
-                             case re:run(Res, "erl\\.exe", [{capture, none}]) of
-                                 match -> true;
-                                 _     -> false
-                             end
-                     end}]).
-
-with_os(Handlers) ->
-    {OsFamily, _} = os:type(),
-    case proplists:get_value(OsFamily, Handlers) of
-        undefined -> throw({unsupported_os, OsFamily});
-        Handler   -> Handler()
-    end.
-
-run_ps(Pid) ->
-    Port = erlang:open_port({spawn, "ps -p " ++ Pid},
-                            [exit_status, {line, 16384},
-                             use_stdio, stderr_to_stdout]),
-    exit_loop(Port).
-
-exit_loop(Port) ->
-    receive
-        {Port, {exit_status, Rc}} -> Rc;
-        {Port, _}                 -> exit_loop(Port)
+become(BecomeNode) ->
+    error_logger:tty(false),
+    ok = net_kernel:stop(),
+    case net_adm:ping(BecomeNode) of
+        pong -> exit({node_running, BecomeNode});
+        pang -> io:format("  * Impersonating node: ~s...", [BecomeNode]),
+                {ok, _} = rabbit_cli:start_distribution(BecomeNode),
+                io:format(" done~n", []),
+                Dir = mnesia:system_info(directory),
+                io:format("  * Mnesia directory  : ~s~n", [Dir])
     end.
 
 %%----------------------------------------------------------------------------
@@ -715,9 +737,23 @@ unsafe_rpc(Node, Mod, Fun, Args) ->
         Normal            -> Normal
     end.
 
+ensure_app_running(Node) ->
+    case call(Node, {rabbit, is_running, []}) of
+        true  -> ok;
+        false -> {error_string,
+                  rabbit_misc:format(
+                    "rabbit application is not running on node ~s.~n"
+                    " * Suggestion: start it with \"rabbitmqctl start_app\" "
+                    "and try again", [Node])};
+        Other -> Other
+    end.
+
 call(Node, {Mod, Fun, Args}) ->
     rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)).
 
+call(Node, {Mod, Fun, Args}, Timeout) ->
+    rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args), Timeout).
+
 list_to_binary_utf8(L) ->
     B = list_to_binary(L),
     case rabbit_binary_parser:validate_utf8(B) of
@@ -725,9 +761,6 @@ list_to_binary_utf8(L) ->
         error -> throw({error, {not_utf_8, L}})
     end.
 
-rpc_call(Node, Mod, Fun, Args) ->
-    rpc:call(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
-
 %% escape does C-style backslash escaping of non-printable ASCII
 %% characters.  We don't escape characters above 127, since they may
 %% form part of UTF-8 strings.
@@ -754,3 +787,7 @@ prettify_typed_amqp_value(table,   Value) -> prettify_amqp_table(Value);
 prettify_typed_amqp_value(array,   Value) -> [prettify_typed_amqp_value(T, V) ||
                                                  {T, V} <- Value];
 prettify_typed_amqp_value(_Type,   Value) -> Value.
+
+split_list([])         -> [];
+split_list([_])        -> exit(even_list_needed);
+split_list([A, B | T]) -> [{A, B} | split_list(T)].
index ec32e6878dbf4860ae716b926af90935bf3b45c2..29032df856c3644a1910b184d4b1fcc533b3224f 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_dead_letter).
@@ -66,8 +66,7 @@ make_msg(Msg = #basic_message{content       = Content,
                         {<<"time">>,         timestamp, TimeSec},
                         {<<"exchange">>,     longstr,   Exchange#resource.name},
                         {<<"routing-keys">>, array,     RKs1}] ++ PerMsgTTL,
-                HeadersFun1(rabbit_basic:prepend_table_header(<<"x-death">>,
-                                                              Info, Headers))
+                HeadersFun1(update_x_death_header(Info, Headers))
         end,
     Content1 = #content{properties = Props} =
         rabbit_basic:map_headers(HeadersFun2, Content),
@@ -78,6 +77,102 @@ make_msg(Msg = #basic_message{content       = Content,
                       routing_keys  = DeathRoutingKeys,
                       content       = Content2}.
 
+
+x_death_event_key(Info, Key) ->
+    case lists:keysearch(Key, 1, Info) of
+        false                         -> undefined;
+        {value, {Key, _KeyType, Val}} -> Val
+    end.
+
+maybe_append_to_event_group(Table, _Key, _SeenKeys, []) ->
+    [Table];
+maybe_append_to_event_group(Table, {_Queue, _Reason} = Key, SeenKeys, Acc) ->
+    case sets:is_element(Key, SeenKeys) of
+        true  -> Acc;
+        false -> [Table | Acc]
+    end.
+
+group_by_queue_and_reason([]) ->
+    [];
+group_by_queue_and_reason([Table]) ->
+    [Table];
+group_by_queue_and_reason(Tables) ->
+    {_, Grouped} =
+        lists:foldl(
+          fun ({table, Info}, {SeenKeys, Acc}) ->
+                  Q = x_death_event_key(Info, <<"queue">>),
+                  R = x_death_event_key(Info, <<"reason">>),
+                  Matcher = queue_and_reason_matcher(Q, R),
+                  {Matches, _} = lists:partition(Matcher, Tables),
+                  {Augmented, N} = case Matches of
+                                       [X]        -> {X, 1};
+                                       [X|_] = Xs -> {X, length(Xs)}
+                                   end,
+                  Key = {Q, R},
+                  Acc1 = maybe_append_to_event_group(
+                           ensure_xdeath_event_count(Augmented, N),
+                           Key, SeenKeys, Acc),
+                  {sets:add_element(Key, SeenKeys), Acc1}
+          end, {sets:new(), []}, Tables),
+    Grouped.
+
+update_x_death_header(Info, Headers) ->
+    Q = x_death_event_key(Info, <<"queue">>),
+    R = x_death_event_key(Info, <<"reason">>),
+    case rabbit_basic:header(<<"x-death">>, Headers) of
+        undefined ->
+            rabbit_basic:prepend_table_header(
+              <<"x-death">>,
+              [{<<"count">>, long, 1} | Info], Headers);
+        {<<"x-death">>, array, Tables} ->
+            %% group existing x-death headers in case we have some from
+            %% before rabbitmq-server#78
+            GroupedTables = group_by_queue_and_reason(Tables),
+            {Matches, Others} = lists:partition(
+                                  queue_and_reason_matcher(Q, R),
+                                  GroupedTables),
+            Info1 = case Matches of
+                        [] ->
+                            [{<<"count">>, long, 1} | Info];
+                        [{table, M}] ->
+                            increment_xdeath_event_count(M)
+                    end,
+            rabbit_misc:set_table_value(
+              Headers, <<"x-death">>, array,
+              [{table, rabbit_misc:sort_field_table(Info1)} | Others])
+    end.
+
+ensure_xdeath_event_count({table, Info}, InitialVal) when InitialVal >= 1 ->
+    {table, ensure_xdeath_event_count(Info, InitialVal)};
+ensure_xdeath_event_count(Info, InitialVal) when InitialVal >= 1 ->
+    case x_death_event_key(Info, <<"count">>) of
+        undefined ->
+            [{<<"count">>, long, InitialVal} | Info];
+        _ ->
+            Info
+    end.
+
+increment_xdeath_event_count(Info) ->
+    case x_death_event_key(Info, <<"count">>) of
+        undefined ->
+            [{<<"count">>, long, 1} | Info];
+        N ->
+            lists:keyreplace(
+              <<"count">>, 1, Info,
+              {<<"count">>, long, N + 1})
+    end.
+
+queue_and_reason_matcher(Q, R) ->
+    F = fun(Info) ->
+                x_death_event_key(Info, <<"queue">>) =:= Q
+                    andalso x_death_event_key(Info, <<"reason">>) =:= R
+        end,
+    fun({table, Info}) ->
+            F(Info);
+       (Info) when is_list(Info) ->
+            F(Info)
+    end.
+
 per_msg_ttl_header(#'P_basic'{expiration = undefined}) ->
     [];
 per_msg_ttl_header(#'P_basic'{expiration = Expiration}) ->
@@ -129,6 +224,9 @@ is_cycle(Queue, Deaths) ->
                            {longstr, <<"rejected">>} =/=
                                rabbit_misc:table_lookup(D, <<"reason">>);
                        (_) ->
+                           %% There was something we didn't expect, therefore
+                           %% a client must have put it there, therefore the
+                           %% cycle was not "fully automatic".
                            false
                    end, Cycle ++ [H])
     end.
@@ -139,7 +237,7 @@ log_cycle_once(Queues) ->
         true      -> ok;
         undefined -> rabbit_log:warning(
                        "Message dropped. Dead-letter queues cycle detected" ++
-                       ": ~p~nThis cycle will NOT be reported again.~n",
+                           ": ~p~nThis cycle will NOT be reported again.~n",
                        [Queues]),
                      put(Key, true)
     end.
index 4eafada3f3901779bb3fb330b39bbabb4857d2a3..531f3f922ebc79a66a5a02704807980739fe8c12 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_diagnostics).
 
 -define(PROCESS_INFO,
-        [current_stacktrace, initial_call, dictionary, message_queue_len,
-         links, monitors, monitored_by, heap_size]).
+        [registered_name, current_stacktrace, initial_call, dictionary,
+         message_queue_len, links, monitors, monitored_by, heap_size]).
 
--export([maybe_stuck/0, maybe_stuck/1]).
+-export([maybe_stuck/0, maybe_stuck/1, top_memory_use/0, top_memory_use/1,
+         top_binary_refs/0, top_binary_refs/1]).
 
 maybe_stuck() -> maybe_stuck(5000).
 
@@ -41,13 +42,13 @@ maybe_stuck(Pids, Timeout) ->
     maybe_stuck(Pids2, Timeout - 500).
 
 looks_stuck(Pid) ->
-    case process_info(Pid, status) of
+    case info(Pid, status, gone) of
         {status, waiting} ->
             %% It's tempting to just check for message_queue_len > 0
             %% here rather than mess around with stack traces and
             %% heuristics. But really, sometimes freshly stuck
             %% processes can have 0 messages...
-            case erlang:process_info(Pid, current_stacktrace) of
+            case info(Pid, current_stacktrace, gone) of
                 {current_stacktrace, [H|_]} ->
                     maybe_stuck_stacktrace(H);
                 _ ->
@@ -75,5 +76,38 @@ maybe_stuck_stacktrace({_M, F, _A}) ->
         _ -> false
     end.
 
+top_memory_use() -> top_memory_use(30).
+
+top_memory_use(Count) ->
+    Pids = processes(),
+    io:format("Memory use: top ~p of ~p processes.~n", [Count, length(Pids)]),
+    Procs = [{info(Pid, memory, 0), info(Pid)} || Pid <- Pids],
+    Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
+    io:format("~p~n", [Sorted]).
+
+top_binary_refs() -> top_binary_refs(30).
+
+top_binary_refs(Count) ->
+    Pids = processes(),
+    io:format("Binary refs: top ~p of ~p processes.~n", [Count, length(Pids)]),
+    Procs = [{{binary_refs, binary_refs(Pid)}, info(Pid)} || Pid <- Pids],
+    Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
+    io:format("~p~n", [Sorted]).
+
+binary_refs(Pid) ->
+    {binary, Refs} = info(Pid, binary, []),
+    lists:sum([Sz || {_Ptr, Sz} <- lists:usort([{Ptr, Sz} ||
+                                                   {Ptr, Sz, _Cnt} <- Refs])]).
+
 info(Pid) ->
-    [{pid, Pid} | process_info(Pid, ?PROCESS_INFO)].
+    [{pid, Pid} | info(Pid, ?PROCESS_INFO, [])].
+
+info(Pid, Infos, Default) ->
+    try
+        process_info(Pid, Infos)
+    catch
+        _:_ -> case is_atom(Infos) of
+                   true  -> {Infos, Default};
+                   false -> Default
+               end
+    end.
index 749a67b1d507ab88c44ddb802465deab52f87df8..d79ef4aeb9a3f4ec50f3a3f5e0aee5fe15fca010 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_direct).
@@ -83,16 +83,27 @@ connect({Username, Password}, VHost, Protocol, Pid, Infos) ->
 connect0(AuthFun, VHost, Protocol, Pid, Infos) ->
     case rabbit:is_running() of
         true  -> case AuthFun() of
-                     {ok, User} ->
+                     {ok, User = #user{username = Username}} ->
+                         notify_auth_result(Username,
+                           user_authentication_success, []),
                          connect1(User, VHost, Protocol, Pid, Infos);
-                     {refused, _M, _A} ->
+                     {refused, Username, Msg, Args} ->
+                         notify_auth_result(Username,
+                           user_authentication_failure,
+                           [{error, rabbit_misc:format(Msg, Args)}]),
                          {error, {auth_failure, "Refused"}}
                  end;
         false -> {error, broker_not_found_on_node}
     end.
 
+notify_auth_result(Username, AuthResult, ExtraProps) ->
+    EventProps = [{connection_type, direct},
+                  {name, case Username of none -> ''; _ -> Username end}] ++
+                 ExtraProps,
+    rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
 connect1(User, VHost, Protocol, Pid, Infos) ->
-    try rabbit_access_control:check_vhost_access(User, VHost) of
+    try rabbit_access_control:check_vhost_access(User, VHost, undefined) of
         ok -> ok = pg_local:join(rabbit_direct, Pid),
               rabbit_event:notify(connection_created, Infos),
               {ok, {User, rabbit_reader:server_properties(Protocol)}}
index 031a04f0a10ac2da1f8328dda70ccd25858c42bf..518000eb645ba21b6da7fbe0f1153bf4afadb7a6 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_disk_monitor).
diff --git a/rabbitmq-server/src/rabbit_epmd_monitor.erl b/rabbitmq-server/src/rabbit_epmd_monitor.erl
new file mode 100644 (file)
index 0000000..5b06237
--- /dev/null
@@ -0,0 +1,101 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_epmd_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3]).
+
+-record(state, {timer, mod, me, host, port}).
+
+-define(SERVER, ?MODULE).
+-define(CHECK_FREQUENCY, 60000).
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+%% It's possible for epmd to be killed out from underneath us. If that
+%% happens, then obviously clustering and rabbitmqctl stop
+%% working. This process checks up on epmd and restarts it /
+%% re-registers us with it if it has gone away.
+%%
+%% How could epmd be killed?
+%%
+%% 1) The most popular way for this to happen is when running as a
+%%    Windows service. The user starts rabbitmqctl first, and this starts
+%%    epmd under the user's account. When they log out epmd is killed.
+%%
+%% 2) Some packagings of (non-RabbitMQ?) Erlang apps might do "killall
+%%    epmd" as a shutdown or uninstall step.
+%% ----------------------------------------------------------------------------
+
+start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+    {Me, Host} = rabbit_nodes:parts(node()),
+    Mod = net_kernel:epmd_module(),
+    {port, Port, _Version} = Mod:port_please(Me, Host),
+    {ok, ensure_timer(#state{mod  = Mod,
+                             me   = Me,
+                             host = Host,
+                             port = Port})}.
+
+handle_call(_Request, _From, State) ->
+    {noreply, State}.
+
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+handle_info(check, State) ->
+    check_epmd(State),
+    {noreply, ensure_timer(State#state{timer = undefined})};
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+ensure_timer(State) ->
+    rabbit_misc:ensure_timer(State, #state.timer, ?CHECK_FREQUENCY, check).
+
+check_epmd(#state{mod  = Mod,
+                  me   = Me,
+                  host = Host,
+                  port = Port}) ->
+    case Mod:port_please(Me, Host) of
+        noport -> rabbit_log:warning(
+                    "epmd does not know us, re-registering ~s at port ~b~n",
+                    [Me, Port]),
+                  rabbit_nodes:ensure_epmd(),
+                  erl_epmd:register_node(Me, Port);
+        _      -> ok
+    end.
index 353da0a7e260b48cadf425d7fced3835c39cc419..eecb2d64d981a8b90df5f763aef01d4f4f54392b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_error_logger).
 
 start() ->
     {ok, DefaultVHost} = application:get_env(default_vhost),
-    ok = error_logger:add_report_handler(?MODULE, [DefaultVHost]).
+    case error_logger:add_report_handler(?MODULE, [DefaultVHost]) of
+        ok ->
+            ok;
+        {error, {no_such_vhost, DefaultVHost}} ->
+            rabbit_log:warning("Default virtual host '~s' not found; "
+                               "exchange '~s' disabled~n",
+                               [DefaultVHost, ?LOG_EXCH_NAME]),
+            ok
+    end.
 
 stop() ->
-    terminated_ok = error_logger:delete_report_handler(rabbit_error_logger),
-    ok.
+    case error_logger:delete_report_handler(rabbit_error_logger) of
+        terminated_ok             -> ok;
+        {error, module_not_found} -> ok
+    end.
 
 %%----------------------------------------------------------------------------
 
index be84273904befaf5256833b184a4adfa96e0b64a..65ab7fcca84b50b58646b1c97b37d147e8fcfeb4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_error_logger_file_h).
@@ -87,8 +87,10 @@ safe_handle_event(HandleEvent, Event, State) ->
         HandleEvent(Event, State)
     catch
         _:Error ->
-            io:format("Event crashed log handler:~n~P~n~P~n",
-                      [Event, 30, Error, 30]),
+            io:format(
+              "Error in log handler~n====================~n"
+              "Event: ~P~nError: ~P~nStack trace: ~p~n~n",
+              [Event, 30, Error, 30, erlang:get_stacktrace()]),
             {ok, State}
     end.
 
index b867223b50e453573f125a6c69e77229740725d9..13bf6bc6f862ea6ad223270988f7a106323d92bc 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_event).
@@ -23,6 +23,7 @@
          ensure_stats_timer/3, stop_stats_timer/2, reset_stats_timer/2]).
 -export([stats_level/2, if_enabled/3]).
 -export([notify/2, notify/3, notify_if/3]).
+-export([sync_notify/2, sync_notify/3]).
 
 %%----------------------------------------------------------------------------
 
@@ -61,6 +62,9 @@
 -spec(notify/2 :: (event_type(), event_props()) -> 'ok').
 -spec(notify/3 :: (event_type(), event_props(), reference() | 'none') -> 'ok').
 -spec(notify_if/3 :: (boolean(), event_type(), event_props()) -> 'ok').
+-spec(sync_notify/2 :: (event_type(), event_props()) -> 'ok').
+-spec(sync_notify/3 :: (event_type(), event_props(),
+                        reference() | 'none') -> 'ok').
 
 -endif.
 
@@ -145,7 +149,16 @@ notify_if(false, _Type, _Props) -> ok.
 notify(Type, Props) -> notify(Type, Props, none).
 
 notify(Type, Props, Ref) ->
-    gen_event:notify(?MODULE, #event{type      = Type,
-                                     props     = Props,
-                                     reference = Ref,
-                                     timestamp = os:timestamp()}).
+    gen_event:notify(?MODULE, event_cons(Type, Props, Ref)).
+
+sync_notify(Type, Props) -> sync_notify(Type, Props, none).
+
+sync_notify(Type, Props, Ref) ->
+    gen_event:sync_notify(?MODULE, event_cons(Type, Props, Ref)).
+
+event_cons(Type, Props, Ref) ->
+    #event{type      = Type,
+           props     = Props,
+           reference = Ref,
+           timestamp = os:timestamp()}.
+
index 4d4a2a5871756c32b8aef4510f5219d4bb19e4dc..459334455f749709fa44e8d77603d75d28c9e7cb 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange).
 
 -export([recover/0, policy_changed/2, callback/4, declare/6,
          assert_equivalence/6, assert_args_equivalence/2, check_type/1,
-         lookup/1, lookup_or_die/1, list/1, lookup_scratch/2, update_scratch/3,
+         lookup/1, lookup_or_die/1, list/0, list/1, lookup_scratch/2,
+         update_scratch/3, update_decorators/1, immutable/1,
          info_keys/0, info/1, info/2, info_all/1, info_all/2,
          route/2, delete/2, validate_binding/2]).
 %% these must be run inside a mnesia tx
--export([maybe_auto_delete/1, serial/1, peek_serial/1, update/2]).
+-export([maybe_auto_delete/2, serial/1, peek_serial/1, update/2]).
 
 %%----------------------------------------------------------------------------
 
@@ -61,6 +62,7 @@
 -spec(lookup_or_die/1 ::
         (name()) -> rabbit_types:exchange() |
                     rabbit_types:channel_exit()).
+-spec(list/0 :: () -> [rabbit_types:exchange()]).
 -spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]).
 -spec(lookup_scratch/2 :: (name(), atom()) ->
                                rabbit_types:ok(term()) |
@@ -70,6 +72,8 @@
         (name(),
          fun((rabbit_types:exchange()) -> rabbit_types:exchange()))
          -> not_found | rabbit_types:exchange()).
+-spec(update_decorators/1 :: (name()) -> 'ok').
+-spec(immutable/1 :: (rabbit_types:exchange()) -> rabbit_types:exchange()).
 -spec(info_keys/0 :: () -> rabbit_types:info_keys()).
 -spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()).
 -spec(info/2 ::
@@ -86,8 +90,8 @@
 -spec(validate_binding/2 ::
         (rabbit_types:exchange(), rabbit_types:binding())
         -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]})).
--spec(maybe_auto_delete/1::
-        (rabbit_types:exchange())
+-spec(maybe_auto_delete/2::
+        (rabbit_types:exchange(), boolean())
         -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}).
 -spec(serial/1 :: (rabbit_types:exchange()) ->
                        fun((boolean()) -> 'none' | pos_integer())).
@@ -106,24 +110,15 @@ recover() ->
                    mnesia:read({rabbit_exchange, XName}) =:= []
            end,
            fun (X, Tx) ->
-                   case Tx of
-                       true  -> store(X);
-                       false -> ok
-                   end,
-                   callback(X, create, map_create_tx(Tx), [X])
+                   X1 = case Tx of
+                            true  -> store_ram(X);
+                            false -> rabbit_exchange_decorator:set(X)
+                        end,
+                   callback(X1, create, map_create_tx(Tx), [X1])
            end,
            rabbit_durable_exchange),
-    report_missing_decorators(Xs),
     [XName || #exchange{name = XName} <- Xs].
 
-report_missing_decorators(Xs) ->
-    Mods = lists:usort(lists:append([rabbit_exchange_decorator:select(raw, D) ||
-                                     #exchange{decorators = D} <- Xs])),
-    case [M || M <- Mods, code:which(M) =:= non_existing] of
-        [] -> ok;
-        M  -> rabbit_log:warning("Missing exchange decorators: ~p~n", [M])
-    end.
-
 callback(X = #exchange{type       = XType,
                        decorators = Decorators}, Fun, Serial0, Args) ->
     Serial = if is_function(Serial0) -> Serial0;
@@ -158,12 +153,13 @@ serial(#exchange{name = XName} = X) ->
     end.
 
 declare(XName, Type, Durable, AutoDelete, Internal, Args) ->
-    X = rabbit_policy:set(#exchange{name        = XName,
-                                    type        = Type,
-                                    durable     = Durable,
-                                    auto_delete = AutoDelete,
-                                    internal    = Internal,
-                                    arguments   = Args}),
+    X = rabbit_exchange_decorator:set(
+          rabbit_policy:set(#exchange{name        = XName,
+                                      type        = Type,
+                                      durable     = Durable,
+                                      auto_delete = AutoDelete,
+                                      internal    = Internal,
+                                      arguments   = Args})),
     XT = type_to_module(Type),
     %% We want to upset things if it isn't ok
     ok = XT:validate(X),
@@ -171,13 +167,7 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) ->
       fun () ->
               case mnesia:wread({rabbit_exchange, XName}) of
                   [] ->
-                      store(X),
-                      ok = case Durable of
-                               true  -> mnesia:write(rabbit_durable_exchange,
-                                                     X, write);
-                               false -> ok
-                           end,
-                      {new, X};
+                      {new, store(X)};
                   [ExistingX] ->
                       {existing, ExistingX}
               end
@@ -195,7 +185,19 @@ declare(XName, Type, Durable, AutoDelete, Internal, Args) ->
 map_create_tx(true)  -> transaction;
 map_create_tx(false) -> none.
 
-store(X) -> ok = mnesia:write(rabbit_exchange, X, write).
+
+store(X = #exchange{durable = true}) ->
+    mnesia:write(rabbit_durable_exchange, X#exchange{decorators = undefined},
+                 write),
+    store_ram(X);
+store(X = #exchange{durable = false}) ->
+    store_ram(X).
+
+store_ram(X) ->
+    X1 = rabbit_exchange_decorator:set(X),
+    ok = mnesia:write(rabbit_exchange, rabbit_exchange_decorator:set(X1),
+                      write),
+    X1.
 
 %% Used with binaries sent over the wire; the type may not exist.
 check_type(TypeBin) ->
@@ -212,19 +214,18 @@ check_type(TypeBin) ->
             end
     end.
 
-assert_equivalence(X = #exchange{ durable     = Durable,
+assert_equivalence(X = #exchange{ name        = XName,
+                                  durable     = Durable,
                                   auto_delete = AutoDelete,
                                   internal    = Internal,
                                   type        = Type},
-                   Type, Durable, AutoDelete, Internal, RequiredArgs) ->
-    (type_to_module(Type)):assert_args_equivalence(X, RequiredArgs);
-assert_equivalence(#exchange{ name = Name },
-                   _Type, _Durable, _Internal, _AutoDelete, _Args) ->
-    rabbit_misc:protocol_error(
-      precondition_failed,
-      "cannot redeclare ~s with different type, durable, "
-      "internal or autodelete value",
-      [rabbit_misc:rs(Name)]).
+                   ReqType, ReqDurable, ReqAutoDelete, ReqInternal, ReqArgs) ->
+    AFE = fun rabbit_misc:assert_field_equivalence/4,
+    AFE(Type,       ReqType,       XName, type),
+    AFE(Durable,    ReqDurable,    XName, durable),
+    AFE(AutoDelete, ReqAutoDelete, XName, auto_delete),
+    AFE(Internal,   ReqInternal,   XName, internal),
+    (type_to_module(Type)):assert_args_equivalence(X, ReqArgs).
 
 assert_args_equivalence(#exchange{ name = Name, arguments = Args },
                         RequiredArgs) ->
@@ -243,6 +244,8 @@ lookup_or_die(Name) ->
         {error, not_found} -> rabbit_misc:not_found(Name)
     end.
 
+list() -> mnesia:dirty_match_object(rabbit_exchange, #exchange{_ = '_'}).
+
 %% Not dirty_match_object since that would not be transactional when used in a
 %% tx context
 list(VHostPath) ->
@@ -287,20 +290,27 @@ update_scratch(Name, App, Fun) ->
               ok
       end).
 
+update_decorators(Name) ->
+    rabbit_misc:execute_mnesia_transaction(
+      fun() ->
+              case mnesia:wread({rabbit_exchange, Name}) of
+                  [X] -> store_ram(X),
+                         ok;
+                  []  -> ok
+              end
+      end).
+
 update(Name, Fun) ->
     case mnesia:wread({rabbit_exchange, Name}) of
-        [X = #exchange{durable = Durable}] ->
-            X1 = Fun(X),
-            ok = mnesia:write(rabbit_exchange, X1, write),
-            case Durable of
-                true -> ok = mnesia:write(rabbit_durable_exchange, X1, write);
-                _    -> ok
-            end,
-            X1;
-        [] ->
-            not_found
+        [X] -> X1 = Fun(X),
+               store(X1);
+        []  -> not_found
     end.
 
+immutable(X) -> X#exchange{scratches  = none,
+                           policy     = none,
+                           decorators = none}.
+
 info_keys() -> ?INFO_KEYS.
 
 map(VHostPath, F) ->
@@ -333,14 +343,21 @@ info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end).
 route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName,
                 decorators = Decorators} = X,
       #delivery{message = #basic_message{routing_keys = RKs}} = Delivery) ->
-    case {RName, rabbit_exchange_decorator:select(route, Decorators)} of
-        {<<"">>, []} ->
-            %% Optimisation
-            [rabbit_misc:r(VHost, queue, RK) || RK <- lists:usort(RKs)];
-        {_, SelectedDecorators} ->
-            lists:usort(route1(Delivery, SelectedDecorators, {[X], XName, []}))
+    case RName of
+        <<>> ->
+            RKsSorted = lists:usort(RKs),
+            [rabbit_channel:deliver_reply(RK, Delivery) ||
+                RK <- RKsSorted, virtual_reply_queue(RK)],
+            [rabbit_misc:r(VHost, queue, RK) || RK <- RKsSorted,
+                                                not virtual_reply_queue(RK)];
+        _ ->
+            Decs = rabbit_exchange_decorator:select(route, Decorators),
+            lists:usort(route1(Delivery, Decs, {[X], XName, []}))
     end.
 
+virtual_reply_queue(<<"amq.rabbitmq.reply-to.", _/binary>>) -> true;
+virtual_reply_queue(_)                                      -> false.
+
 route1(_, _, {[], _, QNames}) ->
     QNames;
 route1(Delivery, Decorators,
@@ -400,13 +417,13 @@ call_with_exchange(XName, Fun) ->
 
 delete(XName, IfUnused) ->
     Fun = case IfUnused of
-              true  -> fun conditional_delete/1;
-              false -> fun unconditional_delete/1
+              true  -> fun conditional_delete/2;
+              false -> fun unconditional_delete/2
           end,
     call_with_exchange(
       XName,
       fun (X) ->
-              case Fun(X) of
+              case Fun(X, false) of
                   {deleted, X, Bs, Deletions} ->
                       rabbit_binding:process_deletions(
                         rabbit_binding:add_deletion(
@@ -420,21 +437,21 @@ validate_binding(X = #exchange{type = XType}, Binding) ->
     Module = type_to_module(XType),
     Module:validate_binding(X, Binding).
 
-maybe_auto_delete(#exchange{auto_delete = false}) ->
+maybe_auto_delete(#exchange{auto_delete = false}, _OnlyDurable) ->
     not_deleted;
-maybe_auto_delete(#exchange{auto_delete = true} = X) ->
-    case conditional_delete(X) of
+maybe_auto_delete(#exchange{auto_delete = true} = X, OnlyDurable) ->
+    case conditional_delete(X, OnlyDurable) of
         {error, in_use}             -> not_deleted;
         {deleted, X, [], Deletions} -> {deleted, Deletions}
     end.
 
-conditional_delete(X = #exchange{name = XName}) ->
+conditional_delete(X = #exchange{name = XName}, OnlyDurable) ->
     case rabbit_binding:has_for_source(XName) of
-        false  -> unconditional_delete(X);
+        false  -> unconditional_delete(X, OnlyDurable);
         true   -> {error, in_use}
     end.
 
-unconditional_delete(X = #exchange{name = XName}) ->
+unconditional_delete(X = #exchange{name = XName}, OnlyDurable) ->
     %% this 'guarded' delete prevents unnecessary writes to the mnesia
     %% disk log
     case mnesia:wread({rabbit_durable_exchange, XName}) of
@@ -444,7 +461,8 @@ unconditional_delete(X = #exchange{name = XName}) ->
     ok = mnesia:delete({rabbit_exchange, XName}),
     ok = mnesia:delete({rabbit_exchange_serial, XName}),
     Bindings = rabbit_binding:remove_for_source(XName),
-    {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}.
+    {deleted, X, Bindings, rabbit_binding:remove_for_destination(
+                             XName, OnlyDurable)}.
 
 next_serial(XName) ->
     Serial = peek_serial(XName, write),
index 2f056b1bfe93757c128871f7df895335eb736a8a..7c5bfdf9131703d1d06ef142cc7e55803598ad75 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_decorator).
 
 -include("rabbit.hrl").
 
--export([select/2, set/1]).
+-export([select/2, set/1, register/2, unregister/1]).
 
 %% This is like an exchange type except that:
 %%
@@ -104,3 +104,25 @@ list() -> [M || {_, M} <- rabbit_registry:lookup_all(exchange_decorator)].
 
 cons_if_eq(Select,  Select, Item,  List) -> [Item | List];
 cons_if_eq(_Select, _Other, _Item, List) -> List.
+
+register(TypeName, ModuleName) ->
+    rabbit_registry:register(exchange_decorator, TypeName, ModuleName),
+    [maybe_recover(X) || X <- rabbit_exchange:list()],
+    ok.
+
+unregister(TypeName) ->
+    rabbit_registry:unregister(exchange_decorator, TypeName),
+    [maybe_recover(X) || X <- rabbit_exchange:list()],
+    ok.
+
+maybe_recover(X = #exchange{name       = Name,
+                            decorators = Decs}) ->
+    #exchange{decorators = Decs1} = set(X),
+    Old = lists:sort(select(all, Decs)),
+    New = lists:sort(select(all, Decs1)),
+    case New of
+        Old -> ok;
+        _   -> %% TODO create a tx here for non-federation decorators
+               [M:create(none, X) || M <- New -- Old],
+               rabbit_exchange:update_decorators(Name)
+    end.
index 4dd3442838036d4cafe94fa0f7fefdf8642913ec..92c1de6c2183790624eed0bede9be3e475e4f0c6 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_type).
index 8a240a8bf27a110e5ea2f6c4deaa17faa56bdbab..5c4ab3cf464bb62c85ed6e4bb9c0d2edf3b70bc3 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_type_direct).
index 3a1f0717bc24a821b3924b0071a77a74415c8c68..67d88d61af94c978e282632d74ed1a8ac67d95f3 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_type_fanout).
index afce57d94b24d5f4bf93d8ac25dfbe205abe8e35..b56efdfaf6d56bf3ca8e0faeadace8f346e8befa 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_type_headers).
index 457f184a70860ee1ccc457ec56a0b1ae8443b7ba..283bd494ed58f1c29dd31aa76c2074e4bf828619 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_type_invalid).
index af00fe88dd56a5d4be82117736a80c8de0f0f0aa..afbfc652b1b4d7370ef33051f4d7ceab3164ae62 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_exchange_type_topic).
index 81a617a821944d91145b2c66d7d87a59c5eae3fa..6c4f0e5ccde07e8190908ad124d351a5c619f118 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_file).
index 7f6989d4e9b153879b827f56994ef44c2174514e..d5f46e7034e1a4f9a26ee9435cd5392b4b4313c2 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% TODO auto-generate
index 5307d7e25780c8d5eac59601ad74db221eb5c8e5..0a9fe4d873282b820ae04d3b6c8ff212ca36859b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_guid).
index 36b0baa5614335ca8666dc16f7d822a58561b806..993076770fdf4726a0ef4cb5d22d87a03e845ffc 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_heartbeat).
index b17b7de98b9ef1bc00b5e279d6282722191fa4ee..5f148a5284f74660e87de40a18033169099a379c 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% The purpose of the limiter is to stem the flow of messages from
@@ -22,7 +22,7 @@
 %%
 %% Each channel has an associated limiter process, created with
 %% start_link/1, which it passes to queues on consumer creation with
-%% rabbit_amqqueue:basic_consume/9, and rabbit_amqqueue:basic_get/4.
+%% rabbit_amqqueue:basic_consume/10, and rabbit_amqqueue:basic_get/4.
 %% The latter isn't strictly necessary, since basic.get is not
 %% subject to limiting, but it means that whenever a queue knows about
 %% a channel, it also knows about its limiter, which is less fiddly.
 
 -record(lim, {prefetch_count = 0,
               ch_pid,
+              %% 'Notify' is a boolean that indicates whether a queue should be
+              %% notified of a change in the limit or volume that may allow it to
+              %% deliver more messages via the limiter's channel.
               queues = orddict:new(), % QPid -> {MonitorRef, Notify}
               volume = 0}).
-%% 'Notify' is a boolean that indicates whether a queue should be
-%% notified of a change in the limit or volume that may allow it to
-%% deliver more messages via the limiter's channel.
 
+%% mode is of type credit_mode()
 -record(credit, {credit = 0, mode}).
 
 %%----------------------------------------------------------------------------
index f4df0e76d5bc41f70603283a42265db43f047ccc..083204df058da22347a1e2bcd52ab98d19f16b4b 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_log).
 
--behaviour(gen_server).
-
--export([start_link/0]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
-         terminate/2, code_change/3]).
-
--export([log/3, log/4, info/1, info/2, warning/1, warning/2, error/1, error/2]).
-
--define(SERVER, ?MODULE).
+-export([log/3, log/4, debug/1, debug/2, info/1, info/2, warning/1,
+         warning/2, error/1, error/2]).
+-export([with_local_io/1]).
 
 %%----------------------------------------------------------------------------
 
 -export_type([level/0]).
 
 -type(category() :: atom()).
--type(level() :: 'info' | 'warning' | 'error').
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
+-type(level() :: 'debug' | 'info' | 'warning' | 'error').
 
 -spec(log/3 :: (category(), level(), string()) -> 'ok').
 -spec(log/4 :: (category(), level(), string(), [any()]) -> 'ok').
 
+-spec(debug/1   :: (string()) -> 'ok').
+-spec(debug/2   :: (string(), [any()]) -> 'ok').
 -spec(info/1    :: (string()) -> 'ok').
 -spec(info/2    :: (string(), [any()]) -> 'ok').
 -spec(warning/1 :: (string()) -> 'ok').
 -spec(error/1   :: (string()) -> 'ok').
 -spec(error/2   :: (string(), [any()]) -> 'ok').
 
+-spec(with_local_io/1 :: (fun (() -> A)) -> A).
+
 -endif.
 
 %%----------------------------------------------------------------------------
-start_link() ->
-    gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
 
 log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
 
 log(Category, Level, Fmt, Args) when is_list(Args) ->
-    gen_server:cast(?SERVER, {log, Category, Level, Fmt, Args}).
-
+    case level(Level) =< catlevel(Category) of
+        false -> ok;
+        true  -> F = case Level of
+                         debug   -> fun error_logger:info_msg/2;
+                         info    -> fun error_logger:info_msg/2;
+                         warning -> fun error_logger:warning_msg/2;
+                         error   -> fun error_logger:error_msg/2
+                     end,
+                 with_local_io(fun () -> F(Fmt, Args) end)
+    end.
+
+debug(Fmt)         -> log(default, debug,    Fmt).
+debug(Fmt, Args)   -> log(default, debug,    Fmt, Args).
 info(Fmt)          -> log(default, info,    Fmt).
 info(Fmt, Args)    -> log(default, info,    Fmt, Args).
 warning(Fmt)       -> log(default, warning, Fmt).
@@ -66,45 +70,36 @@ warning(Fmt, Args) -> log(default, warning, Fmt, Args).
 error(Fmt)         -> log(default, error,   Fmt).
 error(Fmt, Args)   -> log(default, error,   Fmt, Args).
 
-%%--------------------------------------------------------------------
-
-init([]) ->
-    {ok, CatLevelList} = application:get_env(log_levels),
-    CatLevels = [{Cat, level(Level)} || {Cat, Level} <- CatLevelList],
-    {ok, orddict:from_list(CatLevels)}.
-
-handle_call(_Request, _From, State) ->
-    {noreply, State}.
-
-handle_cast({log, Category, Level, Fmt, Args}, CatLevels) ->
-    CatLevel = case orddict:find(Category, CatLevels) of
-                   {ok, L} -> L;
-                   error   -> level(info)
-               end,
-    case level(Level) =< CatLevel of
-        false -> ok;
-        true  -> (case Level of
-                      info    -> fun error_logger:info_msg/2;
-                      warning -> fun error_logger:warning_msg/2;
-                      error   -> fun error_logger:error_msg/2
-                  end)(Fmt, Args)
-    end,
-    {noreply, CatLevels};
-handle_cast(_Msg, State) ->
-    {noreply, State}.
-
-handle_info(_Info, State) ->
-    {noreply, State}.
-
-terminate(_Reason, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
+catlevel(Category) ->
+    %% We can get here as part of rabbitmqctl when it is impersonating
+    %% a node; in which case the env will not be defined.
+    CatLevelList = case application:get_env(rabbit, log_levels) of
+                       {ok, L}   -> L;
+                       undefined -> []
+                   end,
+    level(proplists:get_value(Category, CatLevelList, info)).
 
 %%--------------------------------------------------------------------
 
+level(debug)   -> 4;
 level(info)    -> 3;
 level(warning) -> 2;
+level(warn)    -> 2;
 level(error)   -> 1;
 level(none)    -> 0.
+
+%% Execute Fun using the IO system of the local node (i.e. the node on
+%% which the code is executing). Since this is invoked for every log
+%% message, we try to avoid unnecessarily churning group_leader/1.
+with_local_io(Fun) ->
+    GL = group_leader(),
+    Node = node(),
+    case node(GL) of
+        Node -> Fun();
+        _    -> group_leader(whereis(user), self()),
+                try
+                    Fun()
+                after
+                    group_leader(GL, self())
+                end
+    end.
index 451ee1f44354d237cadaea87b3461a4bc1223cda..7aa29fc42332d973adcf5cd28e648c70fba734ef 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 
@@ -25,7 +25,7 @@
 -behaviour(gen_server2).
 
 -export([start_link/0, register/2, deregister/1,
-         report_ram_duration/2, stop/0, conserve_resources/3]).
+         report_ram_duration/2, stop/0, conserve_resources/3, memory_use/1]).
 
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2,
          terminate/2, code_change/3]).
@@ -92,6 +92,19 @@ conserve_resources(Pid, disk, Conserve) ->
 conserve_resources(_Pid, _Source, _Conserve) ->
     ok.
 
+memory_use(bytes) ->
+    MemoryLimit = vm_memory_monitor:get_memory_limit(),
+    {erlang:memory(total), case MemoryLimit > 0.0 of
+                               true  -> MemoryLimit;
+                               false -> infinity
+                           end};
+memory_use(ratio) ->
+    MemoryLimit = vm_memory_monitor:get_memory_limit(),
+    case MemoryLimit > 0.0 of
+        true  -> erlang:memory(total) / MemoryLimit;
+        false -> infinity
+    end.
+
 %%----------------------------------------------------------------------------
 %% Gen_server callbacks
 %%----------------------------------------------------------------------------
@@ -223,11 +236,7 @@ desired_duration_average(#state{disk_alarm           = false,
                                 queue_duration_count = Count}) ->
     {ok, LimitThreshold} =
         application:get_env(rabbit, vm_memory_high_watermark_paging_ratio),
-    MemoryLimit = vm_memory_monitor:get_memory_limit(),
-    MemoryRatio = case MemoryLimit > 0.0 of
-                      true  -> erlang:memory(total) / MemoryLimit;
-                      false -> infinity
-                  end,
+    MemoryRatio = memory_use(ratio),
     if MemoryRatio =:= infinity ->
             0.0;
        MemoryRatio < LimitThreshold orelse Count == 0 ->
index 23718da18b47f4f450fa7ba4fb6148324335edd7..77a145a9cfdcbbdf688f83a1c60f27b8fb91ee12 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_coordinator).
@@ -19,9 +19,9 @@
 -export([start_link/4, get_gm/1, ensure_monitoring/2]).
 
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
-         code_change/3]).
+         code_change/3, handle_pre_hibernate/1]).
 
--export([joined/2, members_changed/3, handle_msg/3]).
+-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
 
 -behaviour(gen_server2).
 -behaviour(gm).
@@ -353,9 +353,10 @@ handle_cast({gm_deaths, DeadGMPids},
   when node(MPid) =:= node() ->
     case rabbit_mirror_queue_misc:remove_from_queue(
            QueueName, MPid, DeadGMPids) of
-        {ok, MPid, DeadPids} ->
+        {ok, MPid, DeadPids, ExtraNodes} ->
             rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
                                                    DeadPids),
+            rabbit_mirror_queue_misc:add_mirrors(QueueName, ExtraNodes, async),
             noreply(State);
         {error, not_found} ->
             {stop, normal, State}
@@ -384,15 +385,20 @@ handle_info(Msg, State) ->
     {stop, {unexpected_info, Msg}, State}.
 
 terminate(_Reason, #state{}) ->
-    %% gen_server case
-    ok;
-terminate([_CPid], _Reason) ->
-    %% gm case
     ok.
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
 
+handle_pre_hibernate(State = #state { gm = GM }) ->
+    %% Since GM notifications of deaths are lazy we might not get a
+    %% timely notification of slave death if policy changes when
+    %% everything is idle. So cause some activity just before we
+    %% sleep. This won't cause us to go into perpetual motion as the
+    %% heartbeat does not wake up coordinator or slaves.
+    gm:broadcast(GM, hibernate_heartbeat),
+    {hibernate, State}.
+
 %% ---------------------------------------------------------------------------
 %% GM
 %% ---------------------------------------------------------------------------
@@ -416,6 +422,9 @@ handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
 handle_msg([_CPid], _From, _Msg) ->
     ok.
 
+handle_terminate([_CPid], _Reason) ->
+    ok.
+
 %% ---------------------------------------------------------------------------
 %% Others
 %% ---------------------------------------------------------------------------
index 2b16b9118d404c6a1cc8b79a34ee9b2dab9cce65..7890128872452635b2ee4a97c1330176edd18347 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_master).
 
 -export([init/3, terminate/2, delete_and_terminate/2,
-         purge/1, purge_acks/1, publish/5, publish_delivered/4,
-         discard/3, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
+         purge/1, purge_acks/1, publish/6, publish_delivered/5,
+         discard/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
          len/1, is_empty/1, depth/1, drain_confirmed/1,
          dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
          needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1,
-         msg_rates/1, status/1, invoke/3, is_duplicate/2]).
+         msg_rates/1, info/2, invoke/3, is_duplicate/2]).
 
--export([start/1, stop/0]).
+-export([start/1, stop/0, delete_crashed/1]).
 
 -export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]).
 
                                  coordinator         :: pid(),
                                  backing_queue       :: atom(),
                                  backing_queue_state :: any(),
-                                 seen_status         :: dict(),
+                                 seen_status         :: dict:dict(),
                                  confirmed           :: [rabbit_guid:guid()],
-                                 known_senders       :: set()
+                                 known_senders       :: sets:set()
                                }).
 
 -spec(promote_backing_queue_state/8 ::
-        (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()], dict(),
-         [pid()]) -> master_state()).
+        (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()],
+         dict:dict(), [pid()]) -> master_state()).
 -spec(sender_death_fun/0 :: () -> death_fun()).
 -spec(depth_fun/0 :: () -> depth_fun()).
 -spec(init_with_existing_bq/3 :: (rabbit_types:amqqueue(), atom(), any()) ->
@@ -90,6 +90,9 @@ stop() ->
     %% Same as start/1.
     exit({not_valid_for_generic_backing_queue, ?MODULE}).
 
+delete_crashed(_QName) ->
+    exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
 init(Q, Recover, AsyncCallback) ->
     {ok, BQ} = application:get_env(backing_queue_module),
     BQS = BQ:init(Q, Recover, AsyncCallback),
@@ -107,7 +110,8 @@ init_with_existing_bq(Q = #amqqueue{name = QName}, BQ, BQS) ->
                    [Q1 = #amqqueue{gm_pids = GMPids}]
                        = mnesia:read({rabbit_queue, QName}),
                    ok = rabbit_amqqueue:store_queue(
-                          Q1#amqqueue{gm_pids = [{GM, Self} | GMPids]})
+                          Q1#amqqueue{gm_pids = [{GM, Self} | GMPids],
+                                      state   = live})
            end),
     {_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q),
     %% We need synchronous add here (i.e. do not return until the
@@ -170,10 +174,24 @@ terminate({shutdown, dropped} = Reason,
     State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)};
 
 terminate(Reason,
-          State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+          State = #state { name                = QName,
+                           backing_queue       = BQ,
+                           backing_queue_state = BQS }) ->
     %% Backing queue termination. The queue is going down but
     %% shouldn't be deleted. Most likely safe shutdown of this
-    %% node. Thus just let some other slave take over.
+    %% node.
+    {ok, Q = #amqqueue{sync_slave_pids = SSPids}} =
+        rabbit_amqqueue:lookup(QName),
+    case SSPids =:= [] andalso
+        rabbit_policy:get(<<"ha-promote-on-shutdown">>, Q) =/= <<"always">> of
+        true  -> %% Remove the whole queue to avoid data loss
+                 rabbit_mirror_queue_misc:log_warning(
+                   QName, "Stopping all nodes on master shutdown since no "
+                   "synchronised slave is available~n", []),
+                 stop_all_slaves(Reason, State);
+        false -> %% Just let some other slave take over.
+                 ok
+    end,
     State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
 
 delete_and_terminate(Reason, State = #state { backing_queue       = BQ,
@@ -181,11 +199,17 @@ delete_and_terminate(Reason, State = #state { backing_queue       = BQ,
     stop_all_slaves(Reason, State),
     State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
 
-stop_all_slaves(Reason, #state{name = QName, gm   = GM}) ->
+stop_all_slaves(Reason, #state{name = QName, gm = GM}) ->
     {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
-    MRefs = [erlang:monitor(process, Pid) || Pid <- [GM | SPids]],
+    PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]],
     ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
-    [receive {'DOWN', MRef, process, _Pid, _Info} -> ok end || MRef <- MRefs],
+    %% It's possible that we could be partitioned from some slaves
+    %% between the lookup and the broadcast, in which case we could
+    %% monitor them but they would not have received the GM
+    %% message. So only wait for slaves which are still
+    %% not-partitioned.
+    [receive {'DOWN', MRef, process, _Pid, _Info} -> ok end
+     || {Pid, MRef} <- PidsMRefs, rabbit_mnesia:on_running_node(Pid)],
     %% Normally when we remove a slave another slave or master will
     %% notice and update Mnesia. But we just removed them all, and
     %% have stopped listening ourselves. So manually clean up.
@@ -206,37 +230,38 @@ purge(State = #state { gm                  = GM,
 
 purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}).
 
-publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid,
+publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid, Flow,
         State = #state { gm                  = GM,
                          seen_status         = SS,
                          backing_queue       = BQ,
                          backing_queue_state = BQS }) ->
     false = dict:is_key(MsgId, SS), %% ASSERTION
-    ok = gm:broadcast(GM, {publish, ChPid, MsgProps, Msg},
+    ok = gm:broadcast(GM, {publish, ChPid, Flow, MsgProps, Msg},
                       rabbit_basic:msg_size(Msg)),
-    BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, BQS),
+    BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS),
     ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
 
 publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
-                  ChPid, State = #state { gm                  = GM,
-                                          seen_status         = SS,
-                                          backing_queue       = BQ,
-                                          backing_queue_state = BQS }) ->
+                  ChPid, Flow, State = #state { gm                  = GM,
+                                                seen_status         = SS,
+                                                backing_queue       = BQ,
+                                                backing_queue_state = BQS }) ->
     false = dict:is_key(MsgId, SS), %% ASSERTION
-    ok = gm:broadcast(GM, {publish_delivered, ChPid, MsgProps, Msg},
+    ok = gm:broadcast(GM, {publish_delivered, ChPid, Flow, MsgProps, Msg},
                       rabbit_basic:msg_size(Msg)),
-    {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, BQS),
+    {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
     State1 = State #state { backing_queue_state = BQS1 },
     {AckTag, ensure_monitoring(ChPid, State1)}.
 
-discard(MsgId, ChPid, State = #state { gm                  = GM,
-                                       backing_queue       = BQ,
-                                       backing_queue_state = BQS,
-                                       seen_status         = SS }) ->
+discard(MsgId, ChPid, Flow, State = #state { gm                  = GM,
+                                             backing_queue       = BQ,
+                                             backing_queue_state = BQS,
+                                             seen_status         = SS }) ->
     false = dict:is_key(MsgId, SS), %% ASSERTION
-    ok = gm:broadcast(GM, {discard, ChPid, MsgId}),
-    ensure_monitoring(ChPid, State #state { backing_queue_state =
-                                                BQ:discard(MsgId, ChPid, BQS) }).
+    ok = gm:broadcast(GM, {discard, ChPid, Flow, MsgId}),
+    ensure_monitoring(ChPid,
+                      State #state { backing_queue_state =
+                                         BQ:discard(MsgId, ChPid, Flow, BQS) }).
 
 dropwhile(Pred, State = #state{backing_queue       = BQ,
                                backing_queue_state = BQS }) ->
@@ -360,10 +385,13 @@ resume(State = #state { backing_queue       = BQ,
 msg_rates(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
     BQ:msg_rates(BQS).
 
-status(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
-    BQ:status(BQS) ++
+info(backing_queue_status,
+     State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+    BQ:info(backing_queue_status, BQS) ++
         [ {mirror_seen,    dict:size(State #state.seen_status)},
-          {mirror_senders, sets:size(State #state.known_senders)} ].
+          {mirror_senders, sets:size(State #state.known_senders)} ];
+info(Item, #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+    BQ:info(Item, BQS).
 
 invoke(?MODULE, Fun, State) ->
     Fun(?MODULE, State);
index b0f092a9a0228a8ee8762230a34a8196b36de3c4..fee890476e449fbe595d1fcefb58be12378401a7 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_misc).
 
 -include("rabbit.hrl").
 
--rabbit_boot_step({?MODULE,
-                   [{description, "HA policy validation"},
-                    {mfa, {rabbit_registry, register,
-                           [policy_validator, <<"ha-mode">>, ?MODULE]}},
-                    {mfa, {rabbit_registry, register,
-                           [policy_validator, <<"ha-params">>, ?MODULE]}},
-                    {mfa, {rabbit_registry, register,
-                           [policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
-                    {requires, rabbit_registry},
-                    {enables, recovery}]}).
+-rabbit_boot_step(
+   {?MODULE,
+    [{description, "HA policy validation"},
+     {mfa, {rabbit_registry, register,
+            [policy_validator, <<"ha-mode">>, ?MODULE]}},
+     {mfa, {rabbit_registry, register,
+            [policy_validator, <<"ha-params">>, ?MODULE]}},
+     {mfa, {rabbit_registry, register,
+            [policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
+     {mfa, {rabbit_registry, register,
+            [policy_validator, <<"ha-promote-on-shutdown">>, ?MODULE]}},
+     {requires, rabbit_registry},
+     {enables, recovery}]}).
 
 %%----------------------------------------------------------------------------
 
@@ -46,7 +49,7 @@
 
 -spec(remove_from_queue/3 ::
         (rabbit_amqqueue:name(), pid(), [pid()])
-        -> {'ok', pid(), [pid()]} | {'error', 'not_found'}).
+        -> {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'}).
 -spec(on_node_up/0 :: () -> 'ok').
 -spec(add_mirrors/3 :: (rabbit_amqqueue:name(), [node()], 'sync' | 'async')
                        -> 'ok').
@@ -67,7 +70,7 @@
 
 %%----------------------------------------------------------------------------
 
-%% Returns {ok, NewMPid, DeadPids}
+%% Returns {ok, NewMPid, DeadPids, ExtraNodes}
 remove_from_queue(QueueName, Self, DeadGMPids) ->
     rabbit_misc:execute_mnesia_transaction(
       fun () ->
@@ -87,30 +90,34 @@ remove_from_queue(QueueName, Self, DeadGMPids) ->
                       Alive     = [Pid || Pid <- [QPid | SPids],
                                           lists:member(Pid, AlivePids)],
                       {QPid1, SPids1} = promote_slave(Alive),
-                      case {{QPid, SPids}, {QPid1, SPids1}} of
-                          {Same, Same} ->
-                              ok;
-                          _ when QPid =:= QPid1 orelse QPid1 =:= Self ->
-                              %% Either master hasn't changed, so
-                              %% we're ok to update mnesia; or we have
-                              %% become the master.
-                              Q1 = Q#amqqueue{pid        = QPid1,
-                                              slave_pids = SPids1,
-                                              gm_pids    = AliveGM},
-                              store_updated_slaves(Q1),
-                              %% If we add and remove nodes at the same time we
-                              %% might tell the old master we need to sync and
-                              %% then shut it down. So let's check if the new
-                              %% master needs to sync.
-                              maybe_auto_sync(Q1);
+                      Extra =
+                          case {{QPid, SPids}, {QPid1, SPids1}} of
+                              {Same, Same} ->
+                                  [];
+                              _ when QPid =:= QPid1 orelse QPid1 =:= Self ->
+                                  %% Either master hasn't changed, so
+                                  %% we're ok to update mnesia; or we have
+                                  %% become the master.
+                                  Q1 = Q#amqqueue{pid        = QPid1,
+                                                  slave_pids = SPids1,
+                                                  gm_pids    = AliveGM},
+                                  store_updated_slaves(Q1),
+                                  %% If we add and remove nodes at the
+                                  %% same time we might tell the old
+                                  %% master we need to sync and then
+                                  %% shut it down. So let's check if
+                                  %% the new master needs to sync.
+                                  maybe_auto_sync(Q1),
+                                  slaves_to_start_on_failure(Q1, DeadGMPids);
                           _ ->
-                              %% Master has changed, and we're not it.
-                              %% [1].
-                              Q1 = Q#amqqueue{slave_pids = Alive,
-                                              gm_pids    = AliveGM},
-                              store_updated_slaves(Q1)
-                      end,
-                      {ok, QPid1, DeadPids}
+                                  %% Master has changed, and we're not it.
+                                  %% [1].
+                                  Q1 = Q#amqqueue{slave_pids = Alive,
+                                                  gm_pids    = AliveGM},
+                                  store_updated_slaves(Q1),
+                                  []
+                          end,
+                      {ok, QPid1, DeadPids, Extra}
               end
       end).
 %% [1] We still update mnesia here in case the slave that is supposed
@@ -136,6 +143,17 @@ remove_from_queue(QueueName, Self, DeadGMPids) ->
 %% aforementioned restriction on updating the master pid, that pid may
 %% not be present in gm_pids, but only if said master has died.
 
+%% Sometimes a slave dying means we need to start more on other
+%% nodes - "exactly" mode can cause this to happen.
+slaves_to_start_on_failure(Q, DeadGMPids) ->
+    %% In case Mnesia has not caught up yet, filter out nodes we know
+    %% to be dead..
+    ClusterNodes = rabbit_mnesia:cluster_nodes(running) --
+        [node(P) || P <- DeadGMPids],
+    {_, OldNodes, _} = actual_queue_nodes(Q),
+    {_, NewNodes} = suggested_queue_nodes(Q, ClusterNodes),
+    NewNodes -- OldNodes.
+
 on_node_up() ->
     QNames =
         rabbit_misc:execute_mnesia_transaction(
@@ -193,31 +211,20 @@ add_mirrors(QName, Nodes, SyncMode) ->
 
 add_mirror(QName, MirrorNode, SyncMode) ->
     case rabbit_amqqueue:lookup(QName) of
-        {ok, #amqqueue { name = Name, pid = QPid, slave_pids = SPids } = Q} ->
-            case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
-                [] ->
-                    start_child(Name, MirrorNode, Q, SyncMode);
-                [SPid] ->
-                    case rabbit_misc:is_process_alive(SPid) of
-                        true  -> {ok, already_mirrored};
-                        false -> start_child(Name, MirrorNode, Q, SyncMode)
-                    end
-            end;
+        {ok, Q} ->
+            rabbit_misc:with_exit_handler(
+              rabbit_misc:const(ok),
+              fun () ->
+                      SPid = rabbit_amqqueue_sup_sup:start_queue_process(
+                               MirrorNode, Q, slave),
+                      log_info(QName, "Adding mirror on node ~p: ~p~n",
+                               [MirrorNode, SPid]),
+                      rabbit_mirror_queue_slave:go(SPid, SyncMode)
+              end);
         {error, not_found} = E ->
             E
     end.
 
-start_child(Name, MirrorNode, Q, SyncMode) ->
-    rabbit_misc:with_exit_handler(
-      rabbit_misc:const(ok),
-      fun () ->
-              {ok, SPid} = rabbit_mirror_queue_slave_sup:start_child(
-                             MirrorNode, [Q]),
-              log_info(Name, "Adding mirror on node ~p: ~p~n",
-                       [MirrorNode, SPid]),
-              rabbit_mirror_queue_slave:go(SPid, SyncMode)
-      end).
-
 report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
     ok;
 report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
@@ -236,17 +243,39 @@ log(Level, QName, Fmt, Args) ->
     rabbit_log:log(mirroring, Level, "Mirrored ~s: " ++ Fmt,
                    [rabbit_misc:rs(QName) | Args]).
 
-store_updated_slaves(Q = #amqqueue{slave_pids      = SPids,
-                                   sync_slave_pids = SSPids}) ->
+store_updated_slaves(Q = #amqqueue{slave_pids         = SPids,
+                                   sync_slave_pids    = SSPids,
+                                   recoverable_slaves = RS}) ->
     %% TODO now that we clear sync_slave_pids in rabbit_durable_queue,
     %% do we still need this filtering?
     SSPids1 = [SSPid || SSPid <- SSPids, lists:member(SSPid, SPids)],
-    Q1 = Q#amqqueue{sync_slave_pids = SSPids1},
+    Q1 = Q#amqqueue{sync_slave_pids    = SSPids1,
+                    recoverable_slaves = update_recoverable(SPids, RS),
+                    state              = live},
     ok = rabbit_amqqueue:store_queue(Q1),
     %% Wake it up so that we emit a stats event
     rabbit_amqqueue:notify_policy_changed(Q1),
     Q1.
 
+%% Recoverable nodes are those which we could promote if the whole
+%% cluster were to suddenly stop and we then lose the master; i.e. all
+%% nodes with running slaves, and all stopped nodes which had running
+%% slaves when they were up.
+%%
+%% Therefore we aim here to add new nodes with slaves, and remove
+%% running nodes without slaves, We also try to keep the order
+%% constant, and similar to the live SPids field (i.e. oldest
+%% first). That's not necessarily optimal if nodes spend a long time
+%% down, but we don't have a good way to predict what the optimal is
+%% in that case anyway, and we assume nodes will not just be down for
+%% a long time without being removed.
+update_recoverable(SPids, RS) ->
+    SNodes = [node(SPid) || SPid <- SPids],
+    RunningNodes = rabbit_mnesia:cluster_nodes(running),
+    AddNodes = SNodes -- RS,
+    DelNodes = RunningNodes -- SNodes, %% i.e. running with no slave
+    (RS -- DelNodes) ++ AddNodes.
+
 %%----------------------------------------------------------------------------
 
 promote_slave([SPid | SPids]) ->
@@ -341,6 +370,13 @@ update_mirrors0(OldQ = #amqqueue{name = QName},
     {NewMNode, NewSNodes}    = suggested_queue_nodes(NewQ),
     OldNodes = [OldMNode | OldSNodes],
     NewNodes = [NewMNode | NewSNodes],
+    %% When a mirror dies, remove_from_queue/2 might have to add new
+    %% slaves (in "exactly" mode). It will check mnesia to see which
+    %% slaves there currently are. If drop_mirror/2 is invoked first
+    %% then when we end up in remove_from_queue/2 it will not see the
+    %% slaves that add_mirror/2 will add, and also want to add them
+    %% (even though we are not responding to the death of a
+    %% mirror). Breakage ensues.
     add_mirrors (QName, NewNodes -- OldNodes, async),
     drop_mirrors(QName, OldNodes -- NewNodes),
     %% This is for the case where no extra nodes were added but we changed to
@@ -374,16 +410,21 @@ validate_policy(KeyList) ->
     Mode = proplists:get_value(<<"ha-mode">>, KeyList, none),
     Params = proplists:get_value(<<"ha-params">>, KeyList, none),
     SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none),
-    case {Mode, Params, SyncMode} of
-        {none, none, none} ->
+    PromoteOnShutdown = proplists:get_value(
+                          <<"ha-promote-on-shutdown">>, KeyList, none),
+    case {Mode, Params, SyncMode, PromoteOnShutdown} of
+        {none, none, none, none} ->
             ok;
-        {none, _, _} ->
-            {error, "ha-mode must be specified to specify ha-params or "
-             "ha-sync-mode", []};
+        {none, _, _, _} ->
+            {error, "ha-mode must be specified to specify ha-params, "
+             "ha-sync-mode or ha-promote-on-shutdown", []};
         _ ->
             case module(Mode) of
                 {ok, M} -> case M:validate_policy(Params) of
-                               ok -> validate_sync_mode(SyncMode);
+                               ok -> case validate_sync_mode(SyncMode) of
+                                         ok -> validate_pos(PromoteOnShutdown);
+                                         E  -> E
+                                     end;
                                E  -> E
                            end;
                 _       -> {error, "~p is not a valid ha-mode value", [Mode]}
@@ -398,3 +439,12 @@ validate_sync_mode(SyncMode) ->
         Mode            -> {error, "ha-sync-mode must be \"manual\" "
                             "or \"automatic\", got ~p", [Mode]}
     end.
+
+validate_pos(PromoteOnShutdown) ->
+    case PromoteOnShutdown of
+        <<"always">>      -> ok;
+        <<"when-synced">> -> ok;
+        none              -> ok;
+        Mode              -> {error, "ha-promote-on-shutdown must be "
+                              "\"always\" or \"when-synced\", got ~p", [Mode]}
+    end.
index 1724be660b641c72a672b96502671c5cf82a4420..5bb243746a09fe52c585e912e23d0c757fc5b887 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_mode).
index ab5fccc83daed59a5c2d610972001774c63ccb43..5742cd3963f985f0d0003afdd7dcc23dd89a0970 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_mode_all).
index bdbc4801208446ada8fc187c85962f27138f4296..0c0b7a10e8b03c3a097b26592b4551265e7d0533 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_mode_exactly).
index 1b32f3b39b21cf59a0f1900679e1c324bd666bb8..e63f3403739d6f0be6c0e3de33a816cb3aa4f15c 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_mode_nodes).
index 11d6a79c4ca2d3940aef8a9d4874214ba4159dd6..168e71170b816443f150c73a975a68fad4896ba4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mirror_queue_slave).
 %% All instructions from the GM group must be processed in the order
 %% in which they're received.
 
--export([start_link/1, set_maximum_since_use/2, info/1, go/2]).
+-export([set_maximum_since_use/2, info/1, go/2]).
 
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
          code_change/3, handle_pre_hibernate/1, prioritise_call/4,
          prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
 
--export([joined/2, members_changed/3, handle_msg/3]).
+-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
 
 -behaviour(gen_server2).
 -behaviour(gm).
@@ -71,8 +71,6 @@
 
 %%----------------------------------------------------------------------------
 
-start_link(Q) -> gen_server2:start_link(?MODULE, Q, []).
-
 set_maximum_since_use(QPid, Age) ->
     gen_server2:cast(QPid, {set_maximum_since_use, Age}).
 
@@ -82,7 +80,7 @@ init(Q) ->
     ?store_proc_name(Q#amqqueue.name),
     {ok, {not_started, Q}, hibernate,
      {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN,
-      ?DESIRED_HIBERNATE}}.
+      ?DESIRED_HIBERNATE}, ?MODULE}.
 
 go(SPid, sync)  -> gen_server2:call(SPid, go, infinity);
 go(SPid, async) -> gen_server2:cast(SPid, go).
@@ -122,6 +120,7 @@ handle_go(Q = #amqqueue{name = QName}) ->
                    Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
             {ok, BQ} = application:get_env(backing_queue_module),
             Q1 = Q #amqqueue { pid = QPid },
+            ok = rabbit_queue_index:erase(QName), %% For crash recovery
             BQS = bq_init(BQ, Q1, new),
             State = #state { q                   = Q1,
                              gm                  = GM,
@@ -168,11 +167,11 @@ init_it(Self, GM, Node, QName) ->
             case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of
                 []     -> add_slave(Q, Self, GM),
                           {new, QPid, GMPids};
-                [QPid] -> case rabbit_misc:is_process_alive(QPid) of
+                [QPid] -> case rabbit_mnesia:is_process_alive(QPid) of
                               true  -> duplicate_live_master;
                               false -> {stale, QPid}
                           end;
-                [SPid] -> case rabbit_misc:is_process_alive(SPid) of
+                [SPid] -> case rabbit_mnesia:is_process_alive(SPid) of
                               true  -> existing;
                               false -> GMPids1 = [T || T = {_, S} <- GMPids,
                                                        S =/= SPid],
@@ -207,21 +206,28 @@ handle_call({gm_deaths, DeadGMPids}, From,
         {error, not_found} ->
             gen_server2:reply(From, ok),
             {stop, normal, State};
-        {ok, Pid, DeadPids} ->
+        {ok, Pid, DeadPids, ExtraNodes} ->
             rabbit_mirror_queue_misc:report_deaths(Self, false, QName,
                                                    DeadPids),
             case Pid of
                 MPid ->
                     %% master hasn't changed
                     gen_server2:reply(From, ok),
+                    rabbit_mirror_queue_misc:add_mirrors(
+                      QName, ExtraNodes, async),
                     noreply(State);
                 Self ->
                     %% we've become master
                     QueueState = promote_me(From, State),
+                    rabbit_mirror_queue_misc:add_mirrors(
+                      QName, ExtraNodes, async),
                     {become, rabbit_amqqueue_process, QueueState, hibernate};
                 _ ->
                     %% master has changed to not us
                     gen_server2:reply(From, ok),
+                    %% assertion, we don't need to add_mirrors/2 in this
+                    %% branch, see last clause in remove_from_queue/2
+                    [] = ExtraNodes,
                     %% Since GM is by nature lazy we need to make sure
                     %% there is some traffic when a master dies, to
                     %% make sure all slaves get informed of the
@@ -247,7 +253,7 @@ handle_cast({run_backing_queue, Mod, Fun}, State) ->
 handle_cast({gm, Instruction}, State) ->
     handle_process_result(process_instruction(Instruction, State));
 
-handle_cast({deliver, Delivery = #delivery{sender = Sender}, true, Flow},
+handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, true},
             State) ->
     %% Asynchronous, non-"mandatory", deliver mode.
     case Flow of
@@ -271,8 +277,8 @@ handle_cast({sync_start, Ref, Syncer},
            DD, Ref, TRef, Syncer, BQ, BQS,
            fun (BQN, BQSN) ->
                    BQSN1 = update_ram_duration(BQN, BQSN),
-                   TRefN = erlang:send_after(?RAM_DURATION_UPDATE_INTERVAL,
-                                             self(), update_ram_duration),
+                   TRefN = rabbit_misc:send_after(?RAM_DURATION_UPDATE_INTERVAL,
+                                                  self(), update_ram_duration),
                    {TRefN, BQSN1}
            end) of
         denied              -> noreply(State1);
@@ -318,6 +324,15 @@ handle_info({bump_credit, Msg}, State) ->
     credit_flow:handle_bump_msg(Msg),
     noreply(State);
 
+%% In the event of a short partition during sync we can detect the
+%% master's 'death', drop out of sync, and then receive sync messages
+%% which were still in flight. Ignore them.
+handle_info({sync_msg, _Ref, _Msg, _Props, _Unacked}, State) ->
+    noreply(State);
+
+handle_info({sync_complete, _Ref}, State) ->
+    noreply(State);
+
 handle_info(Msg, State) ->
     {stop, {unexpected_info, Msg}, State}.
 
@@ -339,10 +354,7 @@ terminate({shutdown, _} = R, State) ->
 terminate(Reason, State = #state{backing_queue       = BQ,
                                  backing_queue_state = BQS}) ->
     terminate_common(State),
-    BQ:delete_and_terminate(Reason, BQS);
-terminate([_SPid], _Reason) ->
-    %% gm case
-    ok.
+    BQ:delete_and_terminate(Reason, BQS).
 
 %% If the Reason is shutdown, or {shutdown, _}, it is not the queue
 %% being deleted: it's just the node going down. Even though we're a
@@ -416,6 +428,9 @@ members_changed([ SPid], _Births, Deaths) ->
         {promote, CPid} -> {become, rabbit_mirror_queue_coordinator, [CPid]}
     end.
 
+handle_msg([_SPid], _From, hibernate_heartbeat) ->
+    %% See rabbit_mirror_queue_coordinator:handle_pre_hibernate/1
+    ok;
 handle_msg([_SPid], _From, request_depth) ->
     %% This is only of value to the master
     ok;
@@ -439,6 +454,9 @@ handle_msg([SPid], _From, {sync_start, Ref, Syncer, SPids}) ->
 handle_msg([SPid], _From, Msg) ->
     ok = gen_server2:cast(SPid, {gm, Msg}).
 
+handle_terminate([_SPid], _Reason) ->
+    ok.
+
 %% ---------------------------------------------------------------------------
 %% Others
 %% ---------------------------------------------------------------------------
@@ -620,7 +638,7 @@ promote_me(From, #state { q                   = Q = #amqqueue { name = QName },
                         (_Msgid, _Status, MTC0) ->
                             MTC0
                     end, gb_trees:empty(), MS),
-    Deliveries = [Delivery#delivery{mandatory = false} || %% [0]
+    Deliveries = [promote_delivery(Delivery) ||
                    {_ChPid, {PubQ, _PendCh, _ChState}} <- dict:to_list(SQ),
                    Delivery <- queue:to_list(PubQ)],
     AwaitGmDown = [ChPid || {ChPid, {_, _, down_from_ch}} <- dict:to_list(SQ)],
@@ -632,8 +650,16 @@ promote_me(From, #state { q                   = Q = #amqqueue { name = QName },
       Q1, rabbit_mirror_queue_master, MasterState, RateTRef, Deliveries, KS1,
       MTC).
 
-%% [0] We reset mandatory to false here because we will have sent the
-%% mandatory_received already as soon as we got the message
+%% We reset mandatory to false here because we will have sent the
+%% mandatory_received already as soon as we got the message. We also
+%% need to send an ack for these messages since the channel is waiting
+%% for one for the via-GM case and we will not now receive one.
+promote_delivery(Delivery = #delivery{sender = Sender, flow = Flow}) ->
+    case Flow of
+        flow   -> credit_flow:ack(Sender);
+        noflow -> ok
+    end,
+    Delivery#delivery{mandatory = false}.
 
 noreply(State) ->
     {NewState, Timeout} = next_state(State),
@@ -653,8 +679,9 @@ next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) ->
         timed -> {ensure_sync_timer(State1), 0             }
     end.
 
-backing_queue_timeout(State = #state { backing_queue = BQ }) ->
-    run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State).
+backing_queue_timeout(State = #state { backing_queue       = BQ,
+                                       backing_queue_state = BQS }) ->
+    State#state{backing_queue_state = BQ:timeout(BQS)}.
 
 ensure_sync_timer(State) ->
     rabbit_misc:ensure_timer(State, #state.sync_timer_ref,
@@ -814,24 +841,27 @@ publish_or_discard(Status, ChPid, MsgId,
     State1 #state { sender_queues = SQ1, msg_id_status = MS1 }.
 
 
-process_instruction({publish, ChPid, MsgProps,
+process_instruction({publish, ChPid, Flow, MsgProps,
                      Msg = #basic_message { id = MsgId }}, State) ->
+    maybe_flow_ack(ChPid, Flow),
     State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
         publish_or_discard(published, ChPid, MsgId, State),
-    BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, BQS),
+    BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, Flow, BQS),
     {ok, State1 #state { backing_queue_state = BQS1 }};
-process_instruction({publish_delivered, ChPid, MsgProps,
+process_instruction({publish_delivered, ChPid, Flow, MsgProps,
                      Msg = #basic_message { id = MsgId }}, State) ->
+    maybe_flow_ack(ChPid, Flow),
     State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
         publish_or_discard(published, ChPid, MsgId, State),
     true = BQ:is_empty(BQS),
-    {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, BQS),
+    {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
     {ok, maybe_store_ack(true, MsgId, AckTag,
                          State1 #state { backing_queue_state = BQS1 })};
-process_instruction({discard, ChPid, MsgId}, State) ->
+process_instruction({discard, ChPid, Flow, MsgId}, State) ->
+    maybe_flow_ack(ChPid, Flow),
     State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
         publish_or_discard(discarded, ChPid, MsgId, State),
-    BQS1 = BQ:discard(MsgId, ChPid, BQS),
+    BQS1 = BQ:discard(MsgId, ChPid, Flow, BQS),
     {ok, State1 #state { backing_queue_state = BQS1 }};
 process_instruction({drop, Length, Dropped, AckRequired},
                     State = #state { backing_queue       = BQ,
@@ -890,6 +920,9 @@ process_instruction({delete_and_terminate, Reason},
     BQ:delete_and_terminate(Reason, BQS),
     {stop, State #state { backing_queue_state = undefined }}.
 
+maybe_flow_ack(ChPid, flow)    -> credit_flow:ack(ChPid);
+maybe_flow_ack(_ChPid, noflow) -> ok.
+
 msg_ids_to_acktags(MsgIds, MA) ->
     {AckTags, MA1} =
         lists:foldl(
diff --git a/rabbitmq-server/src/rabbit_mirror_queue_slave_sup.erl b/rabbitmq-server/src/rabbit_mirror_queue_slave_sup.erl
deleted file mode 100644 (file)
index b631cc3..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_mirror_queue_slave_sup).
-
--behaviour(supervisor2).
-
--export([start_link/0, start_child/2]).
-
--export([init/1]).
-
--include_lib("rabbit.hrl").
-
--define(SERVER, ?MODULE).
-
-start_link() -> supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
-
-start_child(Node, Args) -> supervisor2:start_child({?SERVER, Node}, Args).
-
-init([]) ->
-    {ok, {{simple_one_for_one, 10, 10},
-          [{rabbit_mirror_queue_slave,
-            {rabbit_mirror_queue_slave, start_link, []},
-            temporary, ?MAX_WAIT, worker, [rabbit_mirror_queue_slave]}]}}.
index e3fae4c09cc3361cf15a86dadedd3c8168125b93..9a8d55f94bdb0db41c4b09ce828d07b61eeeb5d3 100644 (file)
@@ -156,18 +156,29 @@ syncer(Ref, Log, MPid, SPids) ->
     %% We wait for a reply from the slaves so that we know they are in
     %% a receive block and will thus receive messages we send to them
     %% *without* those messages ending up in their gen_server2 pqueue.
-    case [SPid || SPid <- SPids,
-                  receive
-                      {sync_ready, Ref, SPid}       -> true;
-                      {sync_deny,  Ref, SPid}       -> false;
-                      {'DOWN', _, process, SPid, _} -> false
-                  end] of
+    case await_slaves(Ref, SPids) of
         []     -> Log("all slaves already synced", []);
         SPids1 -> MPid ! {ready, self()},
                   Log("mirrors ~p to sync", [[node(SPid) || SPid <- SPids1]]),
                   syncer_loop(Ref, MPid, SPids1)
     end.
 
+await_slaves(Ref, SPids) ->
+    [SPid || SPid <- SPids,
+             rabbit_mnesia:on_running_node(SPid) andalso %% [0]
+                 receive
+                     {sync_ready, Ref, SPid}       -> true;
+                     {sync_deny,  Ref, SPid}       -> false;
+                     {'DOWN', _, process, SPid, _} -> false
+                 end].
+%% [0] This check is in case there's been a partition which has then
+%% healed in between the master retrieving the slave pids from Mnesia
+%% and sending 'sync_start' over GM. If so there might be slaves on the
+%% other side of the partition which we can monitor (since they have
+%% rejoined the distributed system with us) but which did not get the
+%% 'sync_start' and so will not reply. We need to act as though they are
+%% down.
+
 syncer_loop(Ref, MPid, SPids) ->
     MPid ! {next, Ref},
     receive
@@ -241,6 +252,9 @@ slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
         {'$gen_cast', {set_ram_duration_target, Duration}} ->
             BQS1 = BQ:set_ram_duration_target(Duration, BQS),
             slave_sync_loop(Args, {MA, TRef, BQS1});
+        {'$gen_cast', {run_backing_queue, Mod, Fun}} ->
+            BQS1 = BQ:invoke(Mod, Fun, BQS),
+            slave_sync_loop(Args, {MA, TRef, BQS1});
         update_ram_duration ->
             {TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
             slave_sync_loop(Args, {MA, TRef1, BQS1});
@@ -249,9 +263,10 @@ slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
             Props1 = Props#message_properties{needs_confirming = false},
             {MA1, BQS1} =
                 case Unacked of
-                    false -> {MA, BQ:publish(Msg, Props1, true, none, BQS)};
+                    false -> {MA,
+                              BQ:publish(Msg, Props1, true, none, noflow, BQS)};
                     true  -> {AckTag, BQS2} = BQ:publish_delivered(
-                                                Msg, Props1, none, BQS),
+                                                Msg, Props1, none, noflow, BQS),
                              {[{Msg#basic_message.id, AckTag} | MA], BQS2}
                 end,
             slave_sync_loop(Args, {MA1, TRef, BQS1});
index 006bbadfb8237c97dfe6a276be6624aff9314eaf..ed5b38e815b4bfde040f2aa121dd14cac65c4d26 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_misc).
@@ -21,8 +21,8 @@
 -export([method_record_type/1, polite_pause/0, polite_pause/1]).
 -export([die/1, frame_error/2, amqp_error/4, quit/1,
          protocol_error/3, protocol_error/4, protocol_error/1]).
--export([not_found/1, absent/1]).
--export([type_class/1, assert_args_equivalence/4]).
+-export([not_found/1, absent/2]).
+-export([type_class/1, assert_args_equivalence/4, assert_field_equivalence/4]).
 -export([dirty_read/1]).
 -export([table_lookup/2, set_table_value/4]).
 -export([r/3, r/2, r_arg/4, rs/1]).
 -export([table_filter/3]).
 -export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
 -export([format/2, format_many/1, format_stderr/2]).
--export([with_local_io/1, local_info_msg/2]).
 -export([unfold/2, ceil/1, queue_fold/3]).
 -export([sort_field_table/1]).
--export([pid_to_string/1, string_to_pid/1]).
+-export([pid_to_string/1, string_to_pid/1,
+         pid_change_node/2, node_to_fake_pid/1]).
 -export([version_compare/2, version_compare/3]).
 -export([version_minor_equivalent/2]).
 -export([dict_cons/3, orddict_cons/3, gb_trees_cons/3]).
 -export([gb_trees_fold/3, gb_trees_foreach/2]).
--export([parse_arguments/3]).
 -export([all_module_attributes/1, build_acyclic_graph/3]).
 -export([now_ms/0]).
 -export([const/1]).
 -export([format_message_queue/2]).
 -export([append_rpc_all_nodes/4]).
 -export([os_cmd/1]).
+-export([is_os_process_alive/1]).
 -export([gb_sets_difference/2]).
--export([version/0, which_applications/0]).
+-export([version/0, otp_release/0, which_applications/0]).
 -export([sequence_error/1]).
 -export([json_encode/1, json_decode/1, json_to_term/1, term_to_json/1]).
 -export([check_expiry/1]).
 -export([base64url/1]).
 -export([interval_operation/4]).
--export([ensure_timer/4, stop_timer/2]).
+-export([ensure_timer/4, stop_timer/2, send_after/3, cancel_timer/1]).
 -export([get_parent/0]).
 -export([store_proc_name/1, store_proc_name/2]).
 -export([moving_average/4]).
+-export([now_to_ms/1]).
+-export([get_env/3]).
 
 %% Horrible macro to use in guards
 -define(IS_BENIGN_EXIT(R),
 
 -ifdef(use_specs).
 
--export_type([resource_name/0, thunk/1]).
+-export_type([resource_name/0, thunk/1, channel_or_connection_exit/0]).
 
 -type(ok_or_error() :: rabbit_types:ok_or_error(any())).
 -type(thunk(T) :: fun(() -> T)).
 -type(resource_name() :: binary()).
--type(optdef() :: flag | {option, string()}).
 -type(channel_or_connection_exit()
       :: rabbit_types:channel_exit() | rabbit_types:connection_exit()).
 -type(digraph_label() :: term()).
 -type(graph_vertex_fun() ::
-        fun ((atom(), [term()]) -> [{digraph:vertex(), digraph_label()}])).
+        fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph_label()}])).
 -type(graph_edge_fun() ::
-        fun ((atom(), [term()]) -> [{digraph:vertex(), digraph:vertex()}])).
+        fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph:vertex()}])).
+-type(tref() :: {'erlang', reference()} | {timer, timer:tref()}).
 
 -spec(method_record_type/1 :: (rabbit_framing:amqp_method_record())
                               -> rabbit_framing:amqp_method_name()).
 -spec(protocol_error/1 ::
         (rabbit_types:amqp_error()) -> channel_or_connection_exit()).
 -spec(not_found/1 :: (rabbit_types:r(atom())) -> rabbit_types:channel_exit()).
--spec(absent/1 :: (rabbit_types:amqqueue()) -> rabbit_types:channel_exit()).
+-spec(absent/2 :: (rabbit_types:amqqueue(), rabbit_amqqueue:absent_reason())
+                  -> rabbit_types:channel_exit()).
 -spec(type_class/1 :: (rabbit_framing:amqp_field_type()) -> atom()).
 -spec(assert_args_equivalence/4 :: (rabbit_framing:amqp_table(),
                                     rabbit_framing:amqp_table(),
                                     rabbit_types:r(any()), [binary()]) ->
                                         'ok' | rabbit_types:connection_exit()).
+-spec(assert_field_equivalence/4 ::
+        (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
+                                         'ok' | rabbit_types:connection_exit()).
+-spec(equivalence_fail/4 ::
+        (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
+                                 rabbit_types:connection_exit()).
 -spec(dirty_read/1 ::
         ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')).
 -spec(table_lookup/2 ::
 -spec(format/2 :: (string(), [any()]) -> string()).
 -spec(format_many/1 :: ([{string(), [any()]}]) -> string()).
 -spec(format_stderr/2 :: (string(), [any()]) -> 'ok').
--spec(with_local_io/1 :: (fun (() -> A)) -> A).
--spec(local_info_msg/2 :: (string(), [any()]) -> 'ok').
 -spec(unfold/2  :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}).
 -spec(ceil/1 :: (number()) -> integer()).
--spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue()) -> B).
+-spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue:queue()) -> B).
 -spec(sort_field_table/1 ::
         (rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table()).
 -spec(pid_to_string/1 :: (pid()) -> string()).
 -spec(string_to_pid/1 :: (string()) -> pid()).
+-spec(pid_change_node/2 :: (pid(), node()) -> pid()).
+-spec(node_to_fake_pid/1 :: (atom()) -> pid()).
 -spec(version_compare/2 :: (string(), string()) -> 'lt' | 'eq' | 'gt').
 -spec(version_compare/3 ::
         (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt'))
         -> boolean()).
 -spec(version_minor_equivalent/2 :: (string(), string()) -> boolean()).
--spec(dict_cons/3 :: (any(), any(), dict()) -> dict()).
+-spec(dict_cons/3 :: (any(), any(), dict:dict()) -> dict:dict()).
 -spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()).
--spec(gb_trees_cons/3 :: (any(), any(), gb_tree()) -> gb_tree()).
--spec(gb_trees_fold/3 :: (fun ((any(), any(), A) -> A), A, gb_tree()) -> A).
+-spec(gb_trees_cons/3 :: (any(), any(), gb_trees:tree()) -> gb_trees:tree()).
+-spec(gb_trees_fold/3 :: (fun ((any(), any(), A) -> A), A, gb_trees:tree())
+ -> A).
 -spec(gb_trees_foreach/2 ::
-        (fun ((any(), any()) -> any()), gb_tree()) -> 'ok').
--spec(parse_arguments/3 ::
-        ([{atom(), [{string(), optdef()}]} | atom()],
-         [{string(), optdef()}],
-         [string()])
-        -> {'ok', {atom(), [{string(), string()}], [string()]}} |
-           'no_command').
--spec(all_module_attributes/1 :: (atom()) -> [{atom(), [term()]}]).
+        (fun ((any(), any()) -> any()), gb_trees:tree()) -> 'ok').
+-spec(all_module_attributes/1 ::
+        (atom()) -> [{atom(), atom(), [term()]}]).
 -spec(build_acyclic_graph/3 ::
         (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}])
-        -> rabbit_types:ok_or_error2(digraph(),
+        -> rabbit_types:ok_or_error2(digraph:digraph(),
                                      {'vertex', 'duplicate', digraph:vertex()} |
                                      {'edge', ({bad_vertex, digraph:vertex()} |
                                                {bad_edge, [digraph:vertex()]}),
 -spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()).
 -spec(append_rpc_all_nodes/4 :: ([node()], atom(), atom(), [any()]) -> [any()]).
 -spec(os_cmd/1 :: (string()) -> string()).
--spec(gb_sets_difference/2 :: (gb_set(), gb_set()) -> gb_set()).
+-spec(is_os_process_alive/1 :: (non_neg_integer()) -> boolean()).
+-spec(gb_sets_difference/2 :: (gb_sets:set(), gb_sets:set()) -> gb_sets:set()).
 -spec(version/0 :: () -> string()).
+-spec(otp_release/0 :: () -> string()).
 -spec(which_applications/0 :: () -> [{atom(), string(), string()}]).
 -spec(sequence_error/1 :: ([({'error', any()} | any())])
                        -> {'error', any()} | any()).
         -> {any(), non_neg_integer()}).
 -spec(ensure_timer/4 :: (A, non_neg_integer(), non_neg_integer(), any()) -> A).
 -spec(stop_timer/2 :: (A, non_neg_integer()) -> A).
+-spec(send_after/3 :: (non_neg_integer(), pid(), any()) -> tref()).
+-spec(cancel_timer/1 :: (tref()) -> 'ok').
 -spec(get_parent/0 :: () -> pid()).
 -spec(store_proc_name/2 :: (atom(), rabbit_types:proc_name()) -> ok).
 -spec(store_proc_name/1 :: (rabbit_types:proc_type_and_name()) -> ok).
 -spec(moving_average/4 :: (float(), float(), float(), float() | 'undefined')
                           -> float()).
+-spec(now_to_ms/1 :: ({non_neg_integer(),
+                       non_neg_integer(),
+                       non_neg_integer()}) -> pos_integer()).
+-spec(get_env/3 :: (atom(), atom(), term())  -> term()).
 -endif.
 
 %%----------------------------------------------------------------------------
@@ -286,14 +299,18 @@ protocol_error(#amqp_error{} = Error) ->
 
 not_found(R) -> protocol_error(not_found, "no ~s", [rs(R)]).
 
-absent(#amqqueue{name = QueueName, pid = QPid, durable = true}) ->
+absent(#amqqueue{name = QueueName, pid = QPid, durable = true}, nodedown) ->
     %% The assertion of durability is mainly there because we mention
     %% durability in the error message. That way we will hopefully
     %% notice if at some future point our logic changes s.t. we get
     %% here with non-durable queues.
     protocol_error(not_found,
                    "home node '~s' of durable ~s is down or inaccessible",
-                   [node(QPid), rs(QueueName)]).
+                   [node(QPid), rs(QueueName)]);
+
+absent(#amqqueue{name = QueueName}, crashed) ->
+    protocol_error(not_found,
+                   "~s has crashed and failed to restart", [rs(QueueName)]).
 
 type_class(byte)      -> int;
 type_class(short)     -> int;
@@ -310,11 +327,6 @@ assert_args_equivalence(Orig, New, Name, Keys) ->
 
 assert_args_equivalence1(Orig, New, Name, Key) ->
     {Orig1, New1} = {table_lookup(Orig, Key), table_lookup(New, Key)},
-    FailureFun = fun () ->
-                     protocol_error(precondition_failed, "inequivalent arg '~s'"
-                                    "for ~s: received ~s but current is ~s",
-                                    [Key, rs(Name), val(New1), val(Orig1)])
-                 end,
     case {Orig1, New1} of
         {Same, Same} ->
             ok;
@@ -322,20 +334,35 @@ assert_args_equivalence1(Orig, New, Name, Key) ->
             case type_class(OrigType) == type_class(NewType) andalso
                  OrigVal == NewVal of
                  true  -> ok;
-                 false -> FailureFun()
+                 false -> assert_field_equivalence(OrigVal, NewVal, Name, Key)
             end;
-        {_, _} ->
-            FailureFun()
+        {OrigTypeVal, NewTypeVal} ->
+            assert_field_equivalence(OrigTypeVal, NewTypeVal, Name, Key)
     end.
 
+assert_field_equivalence(_Orig, _Orig, _Name, _Key) ->
+    ok;
+assert_field_equivalence(Orig, New, Name, Key) ->
+    equivalence_fail(Orig, New, Name, Key).
+
+equivalence_fail(Orig, New, Name, Key) ->
+    protocol_error(precondition_failed, "inequivalent arg '~s' "
+                   "for ~s: received ~s but current is ~s",
+                   [Key, rs(Name), val(New), val(Orig)]).
+
 val(undefined) ->
     "none";
 val({Type, Value}) ->
     ValFmt = case is_binary(Value) of
                  true  -> "~s";
-                 false -> "~w"
+                 false -> "~p"
              end,
-    format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]).
+    format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]);
+val(Value) ->
+    format(case is_binary(Value) of
+               true  -> "'~s'";
+               false -> "'~p'"
+           end, [Value]).
 
 %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
 %% expensive due to general mnesia overheads (figuring out table types
@@ -499,12 +526,16 @@ execute_mnesia_transaction(TxFun) ->
                                 Res = mnesia:sync_transaction(TxFun),
                                 DiskLogAfter  = mnesia_dumper:get_log_writes(),
                                 case DiskLogAfter == DiskLogBefore of
-                                    true  -> Res;
-                                    false -> {sync, Res}
+                                    true  -> file_handle_cache_stats:update(
+                                              mnesia_ram_tx),
+                                             Res;
+                                    false -> file_handle_cache_stats:update(
+                                              mnesia_disk_tx),
+                                             {sync, Res}
                                 end;
                        true  -> mnesia:sync_transaction(TxFun)
                    end
-           end) of
+           end, single) of
         {sync, {atomic,  Result}} -> mnesia_sync:sync(), Result;
         {sync, {aborted, Reason}} -> throw({error, Reason});
         {atomic,  Result}         -> Result;
@@ -635,23 +666,6 @@ format_stderr(Fmt, Args) ->
     end,
     ok.
 
-%% Execute Fun using the IO system of the local node (i.e. the node on
-%% which the code is executing).
-with_local_io(Fun) ->
-    GL = group_leader(),
-    group_leader(whereis(user), self()),
-    try
-        Fun()
-    after
-        group_leader(GL, self())
-    end.
-
-%% Log an info message on the local node using the standard logger.
-%% Use this if rabbit isn't running and the call didn't originate on
-%% the local node (e.g. rabbitmqctl calls).
-local_info_msg(Format, Args) ->
-    with_local_io(fun () -> error_logger:info_msg(Format, Args) end).
-
 unfold(Fun, Init) ->
     unfold(Fun, [], Init).
 
@@ -682,11 +696,7 @@ sort_field_table(Arguments) ->
 %% regardless of what node we are running on. The representation also
 %% permits easy identification of the pid's node.
 pid_to_string(Pid) when is_pid(Pid) ->
-    %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
-    %% 8.7)
-    <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,Cre:8>>
-        = term_to_binary(Pid),
-    Node = binary_to_term(<<131,100,NodeLen:16,NodeBin:NodeLen/binary>>),
+    {Node, Cre, Id, Ser} = decompose_pid(Pid),
     format("<~s.~B.~B.~B>", [Node, Cre, Id, Ser]).
 
 %% inverse of above
@@ -697,14 +707,33 @@ string_to_pid(Str) ->
     case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$",
                 [{capture,all_but_first,list}]) of
         {match, [NodeStr, CreStr, IdStr, SerStr]} ->
-            <<131,NodeEnc/binary>> = term_to_binary(list_to_atom(NodeStr)),
             [Cre, Id, Ser] = lists:map(fun list_to_integer/1,
                                        [CreStr, IdStr, SerStr]),
-            binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,Cre:8>>);
+            compose_pid(list_to_atom(NodeStr), Cre, Id, Ser);
         nomatch ->
             throw(Err)
     end.
 
+pid_change_node(Pid, NewNode) ->
+    {_OldNode, Cre, Id, Ser} = decompose_pid(Pid),
+    compose_pid(NewNode, Cre, Id, Ser).
+
+%% node(node_to_fake_pid(Node)) =:= Node.
+node_to_fake_pid(Node) ->
+    compose_pid(Node, 0, 0, 0).
+
+decompose_pid(Pid) when is_pid(Pid) ->
+    %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
+    %% 8.7)
+    <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,Cre:8>>
+        = term_to_binary(Pid),
+    Node = binary_to_term(<<131,100,NodeLen:16,NodeBin:NodeLen/binary>>),
+    {Node, Cre, Id, Ser}.
+
+compose_pid(Node, Cre, Id, Ser) ->
+    <<131,NodeEnc/binary>> = term_to_binary(Node),
+    binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,Cre:8>>).
+
 version_compare(A, B, lte) ->
     case version_compare(A, B) of
         eq -> true;
@@ -775,64 +804,6 @@ gb_trees_fold1(Fun, Acc, {Key, Val, It}) ->
 gb_trees_foreach(Fun, Tree) ->
     gb_trees_fold(fun (Key, Val, Acc) -> Fun(Key, Val), Acc end, ok, Tree).
 
-%% Takes:
-%%    * A list of [{atom(), [{string(), optdef()]} | atom()], where the atom()s
-%%      are the accepted commands and the optional [string()] is the list of
-%%      accepted options for that command
-%%    * A list [{string(), optdef()}] of options valid for all commands
-%%    * The list of arguments given by the user
-%%
-%% Returns either {ok, {atom(), [{string(), string()}], [string()]} which are
-%% respectively the command, the key-value pairs of the options and the leftover
-%% arguments; or no_command if no command could be parsed.
-parse_arguments(Commands, GlobalDefs, As) ->
-    lists:foldl(maybe_process_opts(GlobalDefs, As), no_command, Commands).
-
-maybe_process_opts(GDefs, As) ->
-    fun({C, Os}, no_command) ->
-            process_opts(atom_to_list(C), dict:from_list(GDefs ++ Os), As);
-       (C, no_command) ->
-            (maybe_process_opts(GDefs, As))({C, []}, no_command);
-       (_, {ok, Res}) ->
-            {ok, Res}
-    end.
-
-process_opts(C, Defs, As0) ->
-    KVs0 = dict:map(fun (_, flag)        -> false;
-                        (_, {option, V}) -> V
-                    end, Defs),
-    process_opts(Defs, C, As0, not_found, KVs0, []).
-
-%% Consume flags/options until you find the correct command. If there are no
-%% arguments or the first argument is not the command we're expecting, fail.
-%% Arguments to this are: definitions, cmd we're looking for, args we
-%% haven't parsed, whether we have found the cmd, options we've found,
-%% plain args we've found.
-process_opts(_Defs, C, [], found, KVs, Outs) ->
-    {ok, {list_to_atom(C), dict:to_list(KVs), lists:reverse(Outs)}};
-process_opts(_Defs, _C, [], not_found, _, _) ->
-    no_command;
-process_opts(Defs, C, [A | As], Found, KVs, Outs) ->
-    OptType = case dict:find(A, Defs) of
-                  error             -> none;
-                  {ok, flag}        -> flag;
-                  {ok, {option, _}} -> option
-              end,
-    case {OptType, C, Found} of
-        {flag, _, _}     -> process_opts(
-                              Defs, C, As, Found, dict:store(A, true, KVs),
-                              Outs);
-        {option, _, _}   -> case As of
-                                []        -> no_command;
-                                [V | As1] -> process_opts(
-                                               Defs, C, As1, Found,
-                                               dict:store(A, V, KVs), Outs)
-                            end;
-        {none, A, _}     -> process_opts(Defs, C, As, found, KVs, Outs);
-        {none, _, found} -> process_opts(Defs, C, As, found, KVs, [A | Outs]);
-        {none, _, _}     -> no_command
-    end.
-
 now_ms() ->
     timer:now_diff(now(), {0,0,0}) div 1000.
 
@@ -849,20 +820,20 @@ module_attributes(Module) ->
     end.
 
 all_module_attributes(Name) ->
-    Modules =
+    Targets =
         lists:usort(
           lists:append(
-            [Modules || {App, _, _}   <- application:loaded_applications(),
-                        {ok, Modules} <- [application:get_key(App, modules)]])),
+            [[{App, Module} || Module <- Modules] ||
+                {App, _, _}   <- application:loaded_applications(),
+                {ok, Modules} <- [application:get_key(App, modules)]])),
     lists:foldl(
-      fun (Module, Acc) ->
+      fun ({App, Module}, Acc) ->
               case lists:append([Atts || {N, Atts} <- module_attributes(Module),
                                          N =:= Name]) of
                   []   -> Acc;
-                  Atts -> [{Module, Atts} | Acc]
+                  Atts -> [{App, Module, Atts} | Acc]
               end
-      end, [], Modules).
-
+      end, [], Targets).
 
 build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
     G = digraph:new([acyclic]),
@@ -870,13 +841,13 @@ build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
         [case digraph:vertex(G, Vertex) of
              false -> digraph:add_vertex(G, Vertex, Label);
              _     -> ok = throw({graph_error, {vertex, duplicate, Vertex}})
-         end || {Module, Atts}  <- Graph,
-                {Vertex, Label} <- VertexFun(Module, Atts)],
+         end || GraphElem       <- Graph,
+                {Vertex, Label} <- VertexFun(GraphElem)],
         [case digraph:add_edge(G, From, To) of
              {error, E} -> throw({graph_error, {edge, E, From, To}});
              _          -> ok
-         end || {Module, Atts} <- Graph,
-                {From, To}     <- EdgeFun(Module, Atts)],
+         end || GraphElem  <- Graph,
+                {From, To} <- EdgeFun(GraphElem)],
         {ok, G}
     catch {graph_error, Reason} ->
             true = digraph:delete(G),
@@ -902,10 +873,14 @@ ntoab(IP) ->
 %% We try to avoid reconnecting to down nodes here; this is used in a
 %% loop in rabbit_amqqueue:on_node_down/1 and any delays we incur
 %% would be bad news.
+%%
+%% See also rabbit_mnesia:is_process_alive/1 which also requires the
+%% process be in the same running cluster as us (i.e. not partitioned
+%% or some random node).
 is_process_alive(Pid) ->
     Node = node(Pid),
     lists:member(Node, [node() | nodes()]) andalso
-       rpc:call(Node, erlang, is_process_alive, [Pid]) =:= true.
+        rpc:call(Node, erlang, is_process_alive, [Pid]) =:= true.
 
 pget(K, P) -> proplists:get_value(K, P).
 pget(K, P, D) -> proplists:get_value(K, P, D).
@@ -961,6 +936,38 @@ os_cmd(Command) ->
             end
     end.
 
+is_os_process_alive(Pid) ->
+    with_os([{unix, fun () ->
+                            run_ps(Pid) =:= 0
+                    end},
+             {win32, fun () ->
+                             Cmd = "tasklist /nh /fi \"pid eq " ++ Pid ++ "\" ",
+                             Res = os_cmd(Cmd ++ "2>&1"),
+                             case re:run(Res, "erl\\.exe", [{capture, none}]) of
+                                 match -> true;
+                                 _     -> false
+                             end
+                     end}]).
+
+with_os(Handlers) ->
+    {OsFamily, _} = os:type(),
+    case proplists:get_value(OsFamily, Handlers) of
+        undefined -> throw({unsupported_os, OsFamily});
+        Handler   -> Handler()
+    end.
+
+run_ps(Pid) ->
+    Port = erlang:open_port({spawn, "ps -p " ++ Pid},
+                            [exit_status, {line, 16384},
+                             use_stdio, stderr_to_stdout]),
+    exit_loop(Port).
+
+exit_loop(Port) ->
+    receive
+        {Port, {exit_status, Rc}} -> Rc;
+        {Port, _}                 -> exit_loop(Port)
+    end.
+
 gb_sets_difference(S1, S2) ->
     gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
 
@@ -968,6 +975,20 @@ version() ->
     {ok, VSN} = application:get_key(rabbit, vsn),
     VSN.
 
+%% See http://www.erlang.org/doc/system_principles/versions.html
+otp_release() ->
+    File = filename:join([code:root_dir(), "releases",
+                          erlang:system_info(otp_release), "OTP_VERSION"]),
+    case file:read_file(File) of
+        {ok, VerBin} ->
+            %% 17.0 or later, we need the file for the minor version
+            string:strip(binary_to_list(VerBin), both, $\n);
+        {error, _} ->
+            %% R16B03 or earlier (no file, otp_release is correct)
+            %% or we couldn't read the file (so this is best we can do)
+            erlang:system_info(otp_release)
+    end.
+
 %% application:which_applications(infinity) is dangerous, since it can
 %% cause deadlocks on shutdown. So we have to use a timeout variant,
 %% but w/o creating spurious timeout errors.
@@ -1017,7 +1038,9 @@ term_to_json(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
                      V =:= true orelse V =:= false ->
     V.
 
-check_expiry(N) when N > ?MAX_EXPIRY_TIMER -> {error, {value_too_big, N}};
+now_to_ms({Mega, Sec, Micro}) ->
+    (Mega * 1000000 * 1000000 + Sec * 1000000 + Micro) div 1000.
+
 check_expiry(N) when N < 0                 -> {error, {value_negative, N}};
 check_expiry(_N)                           -> ok.
 
@@ -1045,7 +1068,7 @@ interval_operation({M, F, A}, MaxRatio, IdealInterval, LastInterval) ->
 
 ensure_timer(State, Idx, After, Msg) ->
     case element(Idx, State) of
-        undefined -> TRef = erlang:send_after(After, self(), Msg),
+        undefined -> TRef = send_after(After, self(), Msg),
                      setelement(Idx, State, TRef);
         _         -> State
     end.
@@ -1053,15 +1076,35 @@ ensure_timer(State, Idx, After, Msg) ->
 stop_timer(State, Idx) ->
     case element(Idx, State) of
         undefined -> State;
-        TRef      -> case erlang:cancel_timer(TRef) of
-                         false -> State;
-                         _     -> setelement(Idx, State, undefined)
-                     end
+        TRef      -> cancel_timer(TRef),
+                     setelement(Idx, State, undefined)
     end.
 
+%% timer:send_after/3 goes through a single timer process but allows
+%% long delays. erlang:send_after/3 does not have a bottleneck but
+%% only allows max 2^32-1 millis.
+-define(MAX_ERLANG_SEND_AFTER, 4294967295).
+send_after(Millis, Pid, Msg) when Millis > ?MAX_ERLANG_SEND_AFTER ->
+    {ok, Ref} = timer:send_after(Millis, Pid, Msg),
+    {timer, Ref};
+send_after(Millis, Pid, Msg) ->
+    {erlang, erlang:send_after(Millis, Pid, Msg)}.
+
+cancel_timer({erlang, Ref}) -> erlang:cancel_timer(Ref),
+                               ok;
+cancel_timer({timer, Ref})  -> {ok, cancel} = timer:cancel(Ref),
+                               ok.
+
 store_proc_name(Type, ProcName) -> store_proc_name({Type, ProcName}).
 store_proc_name(TypeProcName)   -> put(process_name, TypeProcName).
 
+%% application:get_env/3 is only available in R16B01 or later.
+get_env(Application, Key, Def) ->
+    case application:get_env(Application, Key) of
+        {ok, Val} -> Val;
+        undefined -> Def
+    end.
+
 moving_average(_Time, _HalfLife, Next, undefined) ->
     Next;
 %% We want the Weight to decrease as Time goes up (since Weight is the
index c6c2c8eb1ebf4b0248912273b9d5b94d8068d3c9..d480bef7caf6ee7ce8155a3f8568bfc9e4fcc641 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_mnesia).
          update_cluster_nodes/1,
          change_cluster_node_type/1,
          forget_cluster_node/2,
+         force_load_next_boot/0,
 
          status/0,
          is_clustered/0,
+         on_running_node/1,
+         is_process_alive/1,
          cluster_nodes/1,
          node_type/0,
          dir/0,
 -spec(update_cluster_nodes/1 :: (node()) -> 'ok').
 -spec(change_cluster_node_type/1 :: (node_type()) -> 'ok').
 -spec(forget_cluster_node/2 :: (node(), boolean()) -> 'ok').
+-spec(force_load_next_boot/0 :: () -> 'ok').
 
 %% Various queries to get the status of the db
 -spec(status/0 :: () -> [{'nodes', [{node_type(), [node()]}]} |
                          {'running_nodes', [node()]} |
                          {'partitions', [{node(), [node()]}]}]).
 -spec(is_clustered/0 :: () -> boolean()).
+-spec(on_running_node/1 :: (pid()) -> boolean()).
+-spec(is_process_alive/1 :: (pid()) -> boolean()).
 -spec(cluster_nodes/1 :: ('all' | 'disc' | 'ram' | 'running') -> [node()]).
 -spec(node_type/0 :: () -> node_type()).
 -spec(dir/0 :: () -> file:filename()).
@@ -103,39 +109,50 @@ init() ->
     %% We intuitively expect the global name server to be synced when
     %% Mnesia is up. In fact that's not guaranteed to be the case -
     %% let's make it so.
-    ok = global:sync(),
+    ok = rabbit_node_monitor:global_sync(),
     ok.
 
 init_from_config() ->
+    FindBadNodeNames = fun
+        (Name, BadNames) when is_atom(Name) -> BadNames;
+        (Name, BadNames)                    -> [Name | BadNames]
+    end,
     {TryNodes, NodeType} =
         case application:get_env(rabbit, cluster_nodes) of
+            {ok, {Nodes, Type} = Config}
+            when is_list(Nodes) andalso (Type == disc orelse Type == ram) ->
+                case lists:foldr(FindBadNodeNames, [], Nodes) of
+                    []       -> Config;
+                    BadNames -> e({invalid_cluster_node_names, BadNames})
+                end;
+            {ok, {_, BadType}} when BadType /= disc andalso BadType /= ram ->
+                e({invalid_cluster_node_type, BadType});
             {ok, Nodes} when is_list(Nodes) ->
-                Config = {Nodes -- [node()], case lists:member(node(), Nodes) of
-                                                 true  -> disc;
-                                                 false -> ram
-                                             end},
-                error_logger:warning_msg(
-                  "Converting legacy 'cluster_nodes' configuration~n    ~w~n"
-                  "to~n    ~w.~n~n"
-                  "Please update the configuration to the new format "
-                  "{Nodes, NodeType}, where Nodes contains the nodes that the "
-                  "node will try to cluster with, and NodeType is either "
-                  "'disc' or 'ram'~n", [Nodes, Config]),
-                Config;
-            {ok, Config} ->
-                Config
+                %% The legacy syntax (a nodes list without the node
+                %% type) is unsupported.
+                case lists:foldr(FindBadNodeNames, [], Nodes) of
+                    [] -> e(cluster_node_type_mandatory);
+                    _  -> e(invalid_cluster_nodes_conf)
+                end;
+            {ok, _} ->
+                e(invalid_cluster_nodes_conf)
         end,
-    case find_good_node(nodes_excl_me(TryNodes)) of
+    case TryNodes of
+        [] -> init_db_and_upgrade([node()], disc, false);
+        _  -> auto_cluster(TryNodes, NodeType)
+    end.
+
+auto_cluster(TryNodes, NodeType) ->
+    case find_auto_cluster_node(nodes_excl_me(TryNodes)) of
         {ok, Node} ->
-            rabbit_log:info("Node '~p' selected for clustering from "
-                            "configuration~n", [Node]),
+            rabbit_log:info("Node '~p' selected for auto-clustering~n", [Node]),
             {ok, {_, DiscNodes, _}} = discover_cluster0(Node),
             init_db_and_upgrade(DiscNodes, NodeType, true),
             rabbit_node_monitor:notify_joined_cluster();
         none ->
-            rabbit_log:warning("Could not find any suitable node amongst the "
-                               "ones provided in the configuration: ~p~n",
-                               [TryNodes]),
+            rabbit_log:warning(
+              "Could not find any node for auto-clustering from: ~p~n"
+              "Starting blank node...~n", [TryNodes]),
             init_db_and_upgrade([node()], disc, false)
     end.
 
@@ -163,21 +180,27 @@ join_cluster(DiscoveryNode, NodeType) ->
     {ClusterNodes, _, _} = discover_cluster([DiscoveryNode]),
     case me_in_nodes(ClusterNodes) of
         false ->
-            %% reset the node. this simplifies things and it will be needed in
-            %% this case - we're joining a new cluster with new nodes which
-            %% are not in synch with the current node. I also lifts the burden
-            %% of reseting the node from the user.
-            reset_gracefully(),
-
-            %% Join the cluster
-            rabbit_misc:local_info_msg("Clustering with ~p as ~p node~n",
-                                       [ClusterNodes, NodeType]),
-            ok = init_db_with_mnesia(ClusterNodes, NodeType, true, true),
-            rabbit_node_monitor:notify_joined_cluster(),
-            ok;
+            case check_cluster_consistency(DiscoveryNode, false) of
+                {ok, _} ->
+                    %% reset the node. this simplifies things and it
+                    %% will be needed in this case - we're joining a new
+                    %% cluster with new nodes which are not in synch
+                    %% with the current node. It also lifts the burden
+                    %% of resetting the node from the user.
+                    reset_gracefully(),
+
+                    %% Join the cluster
+                    rabbit_log:info("Clustering with ~p as ~p node~n",
+                                    [ClusterNodes, NodeType]),
+                    ok = init_db_with_mnesia(ClusterNodes, NodeType,
+                                             true, true),
+                    rabbit_node_monitor:notify_joined_cluster(),
+                    ok;
+                {error, Reason} ->
+                    {error, Reason}
+            end;
         true ->
-            rabbit_misc:local_info_msg("Already member of cluster: ~p~n",
-                                       [ClusterNodes]),
+            rabbit_log:info("Already member of cluster: ~p~n", [ClusterNodes]),
             {ok, already_member}
     end.
 
@@ -186,12 +209,12 @@ join_cluster(DiscoveryNode, NodeType) ->
 %% persisted messages
 reset() ->
     ensure_mnesia_not_running(),
-    rabbit_misc:local_info_msg("Resetting Rabbit~n", []),
+    rabbit_log:info("Resetting Rabbit~n", []),
     reset_gracefully().
 
 force_reset() ->
     ensure_mnesia_not_running(),
-    rabbit_misc:local_info_msg("Resetting Rabbit forcefully~n", []),
+    rabbit_log:info("Resetting Rabbit forcefully~n", []),
     wipe().
 
 reset_gracefully() ->
@@ -247,8 +270,8 @@ update_cluster_nodes(DiscoveryNode) ->
             %% nodes
             mnesia:delete_schema([node()]),
             rabbit_node_monitor:write_cluster_status(Status),
-            rabbit_misc:local_info_msg("Updating cluster nodes from ~p~n",
-                                       [DiscoveryNode]),
+            rabbit_log:info("Updating cluster nodes from ~p~n",
+                            [DiscoveryNode]),
             init_db_with_mnesia(AllNodes, node_type(), true, true);
         false ->
             e(inconsistent_cluster)
@@ -271,7 +294,7 @@ forget_cluster_node(Node, RemoveWhenOffline) ->
         {true,  false} -> remove_node_offline_node(Node);
         {true,   true} -> e(online_node_offline_flag);
         {false, false} -> e(offline_node_no_offline_flag);
-        {false,  true} -> rabbit_misc:local_info_msg(
+        {false,  true} -> rabbit_log:info(
                             "Removing node ~p from cluster~n", [Node]),
                           case remove_node_if_mnesia_running(Node) of
                               ok               -> ok;
@@ -303,7 +326,6 @@ remove_node_offline_node(Node) ->
             e(removing_node_from_offline_node)
     end.
 
-
 %%----------------------------------------------------------------------------
 %% Queries
 %%----------------------------------------------------------------------------
@@ -331,6 +353,16 @@ is_running() -> mnesia:system_info(is_running) =:= yes.
 is_clustered() -> AllNodes = cluster_nodes(all),
                   AllNodes =/= [] andalso AllNodes =/= [node()].
 
+on_running_node(Pid) -> lists:member(node(Pid), cluster_nodes(running)).
+
+%% This requires the process be in the same running cluster as us
+%% (i.e. not partitioned or some random node).
+%%
+%% See also rabbit_misc:is_process_alive/1 which does not.
+is_process_alive(Pid) ->
+    on_running_node(Pid) andalso
+        rpc:call(node(Pid), erlang, is_process_alive, [Pid]) =:= true.
+
 cluster_nodes(WhichNodes) -> cluster_status(WhichNodes).
 
 %% This function is the actual source of information, since it gets
@@ -386,7 +418,7 @@ cluster_status(WhichNodes) ->
     end.
 
 node_info() ->
-    {erlang:system_info(otp_release), rabbit_misc:version(),
+    {rabbit_misc:otp_release(), rabbit_misc:version(),
      cluster_status_from_mnesia()}.
 
 node_type() ->
@@ -526,7 +558,7 @@ maybe_force_load() ->
 check_cluster_consistency() ->
     %% We want to find 0 or 1 consistent nodes.
     case lists:foldl(
-           fun (Node,  {error, _})    -> check_cluster_consistency(Node);
+           fun (Node,  {error, _})    -> check_cluster_consistency(Node, true);
                (_Node, {ok, Status})  -> {ok, Status}
            end, {error, not_found}, nodes_excl_me(cluster_nodes(all)))
     of
@@ -556,17 +588,22 @@ check_cluster_consistency() ->
             throw(E)
     end.
 
-check_cluster_consistency(Node) ->
+check_cluster_consistency(Node, CheckNodesConsistency) ->
     case rpc:call(Node, rabbit_mnesia, node_info, []) of
         {badrpc, _Reason} ->
             {error, not_found};
         {_OTP, _Rabbit, {error, _}} ->
             {error, not_found};
-        {OTP, Rabbit, {ok, Status}} ->
+        {OTP, Rabbit, {ok, Status}} when CheckNodesConsistency ->
             case check_consistency(OTP, Rabbit, Node, Status) of
                 {error, _} = E -> E;
                 {ok, Res}      -> {ok, Res}
             end;
+        {OTP, Rabbit, {ok, Status}} ->
+            case check_consistency(OTP, Rabbit) of
+                {error, _} = E -> E;
+                ok             -> {ok, Status}
+            end;
         {_OTP, Rabbit, _Hash, _Status} ->
             %% delegate hash checking implies version mismatch
             version_error("Rabbit", rabbit_misc:version(), Rabbit)
@@ -618,10 +655,10 @@ schema_ok_or_move() ->
         {error, Reason} ->
             %% NB: we cannot use rabbit_log here since it may not have been
             %% started yet
-            error_logger:warning_msg("schema integrity check failed: ~p~n"
-                                     "moving database to backup location "
-                                     "and recreating schema from scratch~n",
-                                     [Reason]),
+            rabbit_log:warning("schema integrity check failed: ~p~n"
+                               "moving database to backup location "
+                               "and recreating schema from scratch~n",
+                               [Reason]),
             ok = move_db(),
             ok = create_schema()
     end.
@@ -647,8 +684,8 @@ move_db() ->
         ok ->
             %% NB: we cannot use rabbit_log here since it may not have
             %% been started yet
-            error_logger:warning_msg("moved database from ~s to ~s~n",
-                                     [MnesiaDir, BackupDir]),
+            rabbit_log:warning("moved database from ~s to ~s~n",
+                               [MnesiaDir, BackupDir]),
             ok;
         {error, Reason} -> throw({error, {cannot_backup_mnesia,
                                           MnesiaDir, BackupDir, Reason}})
@@ -694,7 +731,7 @@ leave_cluster(Node) ->
     end.
 
 wait_for(Condition) ->
-    error_logger:info_msg("Waiting for ~p...~n", [Condition]),
+    rabbit_log:info("Waiting for ~p...~n", [Condition]),
     timer:sleep(1000).
 
 start_mnesia(CheckConsistency) ->
@@ -759,7 +796,7 @@ version_error(Name, This, Remote) ->
                                 "remote node ~s", [Name, This, Remote])}}.
 
 check_otp_consistency(Remote) ->
-    check_version_consistency(erlang:system_info(otp_release), Remote, "OTP").
+    check_version_consistency(rabbit_misc:otp_release(), Remote, "OTP").
 
 check_rabbit_consistency(Remote) ->
     check_version_consistency(
@@ -787,17 +824,24 @@ is_virgin_node() ->
             false
     end.
 
-find_good_node([]) ->
+find_auto_cluster_node([]) ->
     none;
-find_good_node([Node | Nodes]) ->
+find_auto_cluster_node([Node | Nodes]) ->
+    Fail = fun (Fmt, Args) ->
+                   rabbit_log:warning(
+                     "Could not auto-cluster with ~s: " ++ Fmt, [Node | Args]),
+                   find_auto_cluster_node(Nodes)
+           end,
     case rpc:call(Node, rabbit_mnesia, node_info, []) of
-        {badrpc, _Reason}         -> find_good_node(Nodes);
+        {badrpc, _} = Reason         -> Fail("~p~n", [Reason]);
         %% old delegate hash check
-        {_OTP, _Rabbit, _Hash, _} -> find_good_node(Nodes);
-        {OTP, Rabbit, _}          -> case check_consistency(OTP, Rabbit) of
-                                         {error, _} -> find_good_node(Nodes);
-                                         ok         -> {ok, Node}
-                                     end
+        {_OTP, RMQ, _Hash, _}        -> Fail("version ~s~n", [RMQ]);
+        {_OTP, _RMQ, {error, _} = E} -> Fail("~p~n", [E]);
+        {OTP, RMQ, _}                -> case check_consistency(OTP, RMQ) of
+                                            {error, _} -> Fail("versions ~p~n",
+                                                               [{OTP, RMQ}]);
+                                            ok         -> {ok, Node}
+                                        end
     end.
 
 is_only_clustered_disc_node() ->
@@ -812,6 +856,20 @@ nodes_excl_me(Nodes) -> Nodes -- [node()].
 
 e(Tag) -> throw({error, {Tag, error_description(Tag)}}).
 
+error_description({invalid_cluster_node_names, BadNames}) ->
+    "In the 'cluster_nodes' configuration key, the following node names "
+        "are invalid: " ++ lists:flatten(io_lib:format("~p", [BadNames]));
+error_description({invalid_cluster_node_type, BadType}) ->
+    "In the 'cluster_nodes' configuration key, the node type is invalid "
+        "(expected 'disc' or 'ram'): " ++
+        lists:flatten(io_lib:format("~p", [BadType]));
+error_description(cluster_node_type_mandatory) ->
+    "The 'cluster_nodes' configuration key must indicate the node type: "
+        "either {[...], disc} or {[...], ram}";
+error_description(invalid_cluster_nodes_conf) ->
+    "The 'cluster_nodes' configuration key is invalid, it must be of the "
+        "form {[Nodes], Type}, where Nodes is a list of node names and "
+        "Type is either 'disc' or 'ram'";
 error_description(clustering_only_disc_node) ->
     "You cannot cluster a node if it is the only disc node in its existing "
         " cluster. If new nodes joined while this node was offline, use "
diff --git a/rabbitmq-server/src/rabbit_mnesia_rename.erl b/rabbitmq-server/src/rabbit_mnesia_rename.erl
new file mode 100644 (file)
index 0000000..2604216
--- /dev/null
@@ -0,0 +1,267 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mnesia_rename).
+-include("rabbit.hrl").
+
+-export([rename/2]).
+-export([maybe_finish/1]).
+
+-define(CONVERT_TABLES, [schema, rabbit_durable_queue]).
+
+%% Supports renaming the nodes in the Mnesia database. In order to do
+%% this, we take a backup of the database, traverse the backup
+%% changing node names and pids as we go, then restore it.
+%%
+%% That's enough for a standalone node, for clusters the story is more
+%% complex. We can take pairs of nodes From and To, but backing up and
+%% restoring the database changes schema cookies, so if we just do
+%% this on all nodes the cluster will refuse to re-form with
+%% "Incompatible schema cookies.". Therefore we do something similar
+%% to what we do for upgrades - the first node in the cluster to
+%% restart becomes the authority, and other nodes wipe their own
+%% Mnesia state and rejoin. They also need to tell Mnesia the old node
+%% is not coming back.
+%%
+%% If we are renaming nodes one at a time then the running cluster
+%% might not be aware that a rename has taken place, so after we wipe
+%% and rejoin we then update any tables (in practice just
+%% rabbit_durable_queue) which should be aware that we have changed.
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-spec(rename/2 :: (node(), [{node(), node()}]) -> 'ok').
+-spec(maybe_finish/1 :: ([node()]) -> 'ok').
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+rename(Node, NodeMapList) ->
+    try
+        %% Check everything is correct and figure out what we are
+        %% changing from and to.
+        {FromNode, ToNode, NodeMap} = prepare(Node, NodeMapList),
+
+        %% We backup and restore Mnesia even if other nodes are
+        %% running at the time, and defer the final decision about
+        %% whether to use our mutated copy or rejoin the cluster until
+        %% we restart. That means we might be mutating our copy of the
+        %% database while the cluster is running. *Do not* contact the
+        %% cluster while this is happening, we are likely to get
+        %% confused.
+        application:set_env(kernel, dist_auto_connect, never),
+
+        %% Take a copy we can restore from if we abandon the
+        %% rename. We don't restore from the "backup" since restoring
+        %% that changes schema cookies and might stop us rejoining the
+        %% cluster.
+        ok = rabbit_mnesia:copy_db(mnesia_copy_dir()),
+
+        %% And make the actual changes
+        rabbit_control_main:become(FromNode),
+        take_backup(before_backup_name()),
+        convert_backup(NodeMap, before_backup_name(), after_backup_name()),
+        ok = rabbit_file:write_term_file(rename_config_name(),
+                                         [{FromNode, ToNode}]),
+        convert_config_files(NodeMap),
+        rabbit_control_main:become(ToNode),
+        restore_backup(after_backup_name()),
+        ok
+    after
+        stop_mnesia()
+    end.
+
+prepare(Node, NodeMapList) ->
+    %% If we have a previous rename and haven't started since, give up.
+    case rabbit_file:is_dir(dir()) of
+        true  -> exit({rename_in_progress,
+                       "Restart node under old name to roll back"});
+        false -> ok = rabbit_file:ensure_dir(mnesia_copy_dir())
+    end,
+
+    %% Check we don't have two nodes mapped to the same node
+    {FromNodes, ToNodes} = lists:unzip(NodeMapList),
+    case length(FromNodes) - length(lists:usort(ToNodes)) of
+        0 -> ok;
+        _ -> exit({duplicate_node, ToNodes})
+    end,
+
+    %% Figure out which node we are before and after the change
+    FromNode = case [From || {From, To} <- NodeMapList,
+                             To =:= Node] of
+                   [N] -> N;
+                   []  -> Node
+               end,
+    NodeMap = dict:from_list(NodeMapList),
+    ToNode = case dict:find(FromNode, NodeMap) of
+                 {ok, N2} -> N2;
+                 error    -> FromNode
+             end,
+
+    %% Check that we are in the cluster, all old nodes are in the
+    %% cluster, and no new nodes are.
+    Nodes = rabbit_mnesia:cluster_nodes(all),
+    case {FromNodes -- Nodes, ToNodes -- (ToNodes -- Nodes),
+          lists:member(Node, Nodes ++ ToNodes)} of
+        {[], [], true}  -> ok;
+        {[], [], false} -> exit({i_am_not_involved,        Node});
+        {F,  [], _}     -> exit({nodes_not_in_cluster,     F});
+        {_,  T,  _}     -> exit({nodes_already_in_cluster, T})
+    end,
+    {FromNode, ToNode, NodeMap}.
+
+take_backup(Backup) ->
+    start_mnesia(),
+    ok = mnesia:backup(Backup),
+    stop_mnesia().
+
+restore_backup(Backup) ->
+    ok = mnesia:install_fallback(Backup, [{scope, local}]),
+    start_mnesia(),
+    stop_mnesia(),
+    rabbit_mnesia:force_load_next_boot().
+
+maybe_finish(AllNodes) ->
+    case rabbit_file:read_term_file(rename_config_name()) of
+        {ok, [{FromNode, ToNode}]} -> finish(FromNode, ToNode, AllNodes);
+        _                          -> ok
+    end.
+
+finish(FromNode, ToNode, AllNodes) ->
+    case node() of
+        ToNode ->
+            case rabbit_upgrade:nodes_running(AllNodes) of
+                [] -> finish_primary(FromNode, ToNode);
+                _  -> finish_secondary(FromNode, ToNode, AllNodes)
+            end;
+        FromNode ->
+            rabbit_log:info(
+              "Abandoning rename from ~s to ~s since we are still ~s~n",
+              [FromNode, ToNode, FromNode]),
+            [{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()],
+            ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]),
+            ok = rabbit_file:recursive_copy(
+                   mnesia_copy_dir(), rabbit_mnesia:dir()),
+            delete_rename_files();
+        _ ->
+            %% Boot will almost certainly fail but we might as
+            %% well just log this
+            rabbit_log:info(
+              "Rename attempted from ~s to ~s but we are ~s - ignoring.~n",
+              [FromNode, ToNode, node()])
+    end.
+
+finish_primary(FromNode, ToNode) ->
+    rabbit_log:info("Restarting as primary after rename from ~s to ~s~n",
+                    [FromNode, ToNode]),
+    delete_rename_files(),
+    ok.
+
+finish_secondary(FromNode, ToNode, AllNodes) ->
+    rabbit_log:info("Restarting as secondary after rename from ~s to ~s~n",
+                    [FromNode, ToNode]),
+    rabbit_upgrade:secondary_upgrade(AllNodes),
+    rename_in_running_mnesia(FromNode, ToNode),
+    delete_rename_files(),
+    ok.
+
+dir()                -> rabbit_mnesia:dir() ++ "-rename".
+before_backup_name() -> dir() ++ "/backup-before".
+after_backup_name()  -> dir() ++ "/backup-after".
+rename_config_name() -> dir() ++ "/pending.config".
+mnesia_copy_dir()    -> dir() ++ "/mnesia-copy".
+
+delete_rename_files() -> ok = rabbit_file:recursive_delete([dir()]).
+
+start_mnesia() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+                  rabbit_table:force_load(),
+                  rabbit_table:wait_for_replicated().
+stop_mnesia()  -> stopped = mnesia:stop().
+
+convert_backup(NodeMap, FromBackup, ToBackup) ->
+    mnesia:traverse_backup(
+      FromBackup, ToBackup,
+      fun
+          (Row, Acc) ->
+              case lists:member(element(1, Row), ?CONVERT_TABLES) of
+                  true  -> {[update_term(NodeMap, Row)], Acc};
+                  false -> {[Row], Acc}
+              end
+      end, switched).
+
+config_files() ->
+    [rabbit_node_monitor:running_nodes_filename(),
+     rabbit_node_monitor:cluster_status_filename()].
+
+backup_of_conf(Path) ->
+    filename:join([dir(), filename:basename(Path)]).
+
+convert_config_files(NodeMap) ->
+    [convert_config_file(NodeMap, Path) || Path <- config_files()].
+
+convert_config_file(NodeMap, Path) ->
+    {ok, Term} = rabbit_file:read_term_file(Path),
+    {ok, _} = file:copy(Path, backup_of_conf(Path)),
+    ok = rabbit_file:write_term_file(Path, update_term(NodeMap, Term)).
+
+lookup_node(OldNode, NodeMap) ->
+    case dict:find(OldNode, NodeMap) of
+        {ok, NewNode} -> NewNode;
+        error         -> OldNode
+    end.
+
+mini_map(FromNode, ToNode) -> dict:from_list([{FromNode, ToNode}]).
+
+update_term(NodeMap, L) when is_list(L) ->
+    [update_term(NodeMap, I) || I <- L];
+update_term(NodeMap, T) when is_tuple(T) ->
+    list_to_tuple(update_term(NodeMap, tuple_to_list(T)));
+update_term(NodeMap, Node) when is_atom(Node) ->
+    lookup_node(Node, NodeMap);
+update_term(NodeMap, Pid) when is_pid(Pid) ->
+    rabbit_misc:pid_change_node(Pid, lookup_node(node(Pid), NodeMap));
+update_term(_NodeMap, Term) ->
+    Term.
+
+rename_in_running_mnesia(FromNode, ToNode) ->
+    All = rabbit_mnesia:cluster_nodes(all),
+    Running = rabbit_mnesia:cluster_nodes(running),
+    case {lists:member(FromNode, Running), lists:member(ToNode, All)} of
+        {false, true}  -> ok;
+        {true,  _}     -> exit({old_node_running,        FromNode});
+        {_,     false} -> exit({new_node_not_in_cluster, ToNode})
+    end,
+    {atomic, ok} = mnesia:del_table_copy(schema, FromNode),
+    Map = mini_map(FromNode, ToNode),
+    {atomic, _} = transform_table(rabbit_durable_queue, Map),
+    ok.
+
+transform_table(Table, Map) ->
+    mnesia:sync_transaction(
+      fun () ->
+              mnesia:lock({table, Table}, write),
+              transform_table(Table, Map, mnesia:first(Table))
+      end).
+
+transform_table(_Table, _Map, '$end_of_table') ->
+    ok;
+transform_table(Table, Map, Key) ->
+    [Term] = mnesia:read(Table, Key, write),
+    ok = mnesia:write(Table, update_term(Map, Term), write),
+    transform_table(Table, Map, mnesia:next(Table, Key)).
index 2f3ccc355b2a04c325e480e90f49c61bc88943f4..06879b9f6dbfa56d471cb6634ff0bb1db9a106be 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_msg_file).
index d97c2ad55dfe167dc738e8b090dde0ce852af1ba..02a3bd0f15f2747b62e96558b9a96fb5a939c46b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_msg_store).
 -type(client_msstate() :: #client_msstate {
                       server             :: server(),
                       client_ref         :: client_ref(),
-                      file_handle_cache  :: dict(),
+                      file_handle_cache  :: dict:dict(),
                       index_state        :: any(),
                       index_module       :: atom(),
                       dir                :: file:filename(),
         fun ((A) -> 'finished' |
                     {rabbit_types:msg_id(), non_neg_integer(), A})).
 -type(maybe_msg_id_fun() ::
-        'undefined' | fun ((gb_set(), 'written' | 'ignored') -> any())).
+        'undefined' | fun ((gb_sets:set(), 'written' | 'ignored') -> any())).
 -type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')).
 -type(deletion_thunk() :: fun (() -> boolean())).
 
@@ -473,6 +473,7 @@ write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
 
 read(MsgId,
      CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+    file_handle_cache_stats:update(msg_store_read),
     %% Check the cur file cache
     case ets:lookup(CurFileCacheEts, MsgId) of
         [] ->
@@ -507,6 +508,7 @@ server_cast(#client_msstate { server = Server }, Msg) ->
 client_write(MsgId, Msg, Flow,
              CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts,
                                         client_ref         = CRef }) ->
+    file_handle_cache_stats:update(msg_store_write),
     ok = client_update_flying(+1, MsgId, CState),
     ok = update_msg_cache(CurFileCacheEts, MsgId, Msg),
     ok = server_cast(CState, {write, CRef, MsgId, Flow}).
@@ -1299,7 +1301,8 @@ should_mask_action(CRef, MsgId,
 
 open_file(Dir, FileName, Mode) ->
     file_handle_cache:open(form_filename(Dir, FileName), ?BINARY_MODE ++ Mode,
-                           [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]).
+                           [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE},
+                            {read_buffer,  ?HANDLE_CACHE_BUFFER_SIZE}]).
 
 close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) ->
     CState #client_msstate { file_handle_cache = close_handle(Key, FHC) };
index 8af921b1b69a44756e2e4c19976c39f20ae7cad0..f3257e3112584d01e1a6ad3ac284aa188d60b51d 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_msg_store_ets_index).
index ebb51cf7133fd9db3d3c9a23f5e299cc498fab6e..3a5b4d00453b6232102b24c5c795da9f1c01bff1 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_msg_store_gc).
index 5d067cc97f755227d7af3e968137fb0194240416..0c7a37bcd3dfedd9cc8c4c73034060e6b4a8cca3 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_msg_store_index).
index e33c1836787f8839feffdeede8727346a18656d2..1731d489fa66982672752c05af9e894a54099adc 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_net).
index 9082dbd3537e491ca78da7307f18a0acc09deae5..5d877434725efeaf3f1c149709237393c463af19 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_networking).
@@ -26,7 +26,7 @@
 
 %%used by TCP-based transports, e.g. STOMP adapter
 -export([tcp_listener_addresses/1, tcp_listener_spec/6,
-         ensure_ssl/0, ssl_transform_fun/1]).
+         ensure_ssl/0, fix_ssl_options/1, poodle_check/1, ssl_transform_fun/1]).
 
 -export([tcp_listener_started/3, tcp_listener_stopped/3,
          start_client/1, start_ssl_client/2]).
 %% Internal
 -export([connections_local/0]).
 
+-import(rabbit_misc, [pget/2, pget/3, pset/3]).
+
 -include("rabbit.hrl").
 -include_lib("kernel/include/inet.hrl").
 
--define(SSL_TIMEOUT, 5). %% seconds
-
 -define(FIRST_TEST_BIND_PORT, 10000).
 
+%% POODLE
+-define(BAD_SSL_PROTOCOL_VERSIONS, [sslv3]).
+
 %%----------------------------------------------------------------------------
 
 -ifdef(use_specs).
@@ -88,6 +91,8 @@
         (name_prefix(), address(), [gen_tcp:listen_option()], protocol(),
          label(), rabbit_types:mfargs()) -> supervisor:child_spec()).
 -spec(ensure_ssl/0 :: () -> rabbit_types:infos()).
+-spec(fix_ssl_options/1 :: (rabbit_types:infos()) -> rabbit_types:infos()).
+-spec(poodle_check/1 :: (atom()) -> 'ok' | 'danger').
 -spec(ssl_transform_fun/1 ::
         (rabbit_types:infos())
         -> fun ((rabbit_net:socket())
@@ -136,7 +141,10 @@ boot_ssl() ->
             ok;
         {ok, SslListeners} ->
             SslOpts = ensure_ssl(),
-            [start_ssl_listener(Listener, SslOpts) || Listener <- SslListeners],
+            case poodle_check('AMQP') of
+                ok     -> [start_ssl_listener(L, SslOpts) || L <- SslListeners];
+                danger -> ok
+            end,
             ok
     end.
 
@@ -149,13 +157,52 @@ ensure_ssl() ->
     {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
     ok = app_utils:start_applications(SslAppsConfig),
     {ok, SslOptsConfig} = application:get_env(rabbit, ssl_options),
+    fix_ssl_options(SslOptsConfig).
+
+poodle_check(Context) ->
+    {ok, Vsn} = application:get_key(ssl, vsn),
+    case rabbit_misc:version_compare(Vsn, "5.3", gte) of %% R16B01
+        true  -> ok;
+        false -> case application:get_env(rabbit, ssl_allow_poodle_attack) of
+                     {ok, true}  -> ok;
+                     _           -> log_poodle_fail(Context),
+                                    danger
+                 end
+    end.
 
+log_poodle_fail(Context) ->
+    rabbit_log:error(
+      "The installed version of Erlang (~s) contains the bug OTP-10905,~n"
+      "which makes it impossible to disable SSLv3. This makes the system~n"
+      "vulnerable to the POODLE attack. SSL listeners for ~s have therefore~n"
+      "been disabled.~n~n"
+      "You are advised to upgrade to a recent Erlang version; R16B01 is the~n"
+      "first version in which this bug is fixed, but later is usually~n"
+      "better.~n~n"
+      "If you cannot upgrade now and want to re-enable SSL listeners, you can~n"
+      "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n"
+      "'rabbit' section of your configuration file.~n",
+      [rabbit_misc:otp_release(), Context]).
+
+fix_ssl_options(Config) ->
+    fix_verify_fun(fix_ssl_protocol_versions(Config)).
+
+fix_verify_fun(SslOptsConfig) ->
+    %% Starting with ssl 4.0.1 in Erlang R14B, the verify_fun function
+    %% takes 3 arguments and returns a tuple.
+    {ok, SslAppVer} = application:get_key(ssl, vsn),
+    UseNewVerifyFun = rabbit_misc:version_compare(SslAppVer, "4.0.1", gte),
     case rabbit_misc:pget(verify_fun, SslOptsConfig) of
+        {Module, Function, InitialUserState} ->
+            Fun = make_verify_fun(Module, Function, InitialUserState,
+                                  UseNewVerifyFun),
+            rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
         {Module, Function} ->
-            rabbit_misc:pset(verify_fun,
-                             fun (ErrorList) ->
-                                     Module:Function(ErrorList)
-                             end, SslOptsConfig);
+            Fun = make_verify_fun(Module, Function, none,
+                                  UseNewVerifyFun),
+            rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
+        undefined when UseNewVerifyFun ->
+            SslOptsConfig;
         undefined ->
             % unknown_ca errors are silently ignored prior to R14B unless we
             % supply this verify_fun - remove when at least R14B is required
@@ -168,9 +215,68 @@ ensure_ssl() ->
             end
     end.
 
+make_verify_fun(Module, Function, InitialUserState, UseNewVerifyFun) ->
+    try
+        %% Preload the module: it is required to use
+        %% erlang:function_exported/3.
+        Module:module_info()
+    catch
+        _:Exception ->
+            rabbit_log:error("SSL verify_fun: module ~s missing: ~p~n",
+                             [Module, Exception]),
+            throw({error, {invalid_verify_fun, missing_module}})
+    end,
+    NewForm = erlang:function_exported(Module, Function, 3),
+    OldForm = erlang:function_exported(Module, Function, 1),
+    case {NewForm, OldForm} of
+        {true, _} when UseNewVerifyFun ->
+            %% This verify_fun is supported by Erlang R14B+ (ssl
+            %% 4.0.1 and later).
+            Fun = fun(OtpCert, Event, UserState) ->
+                    Module:Function(OtpCert, Event, UserState)
+            end,
+            {Fun, InitialUserState};
+        {_, true} ->
+            %% This verify_fun is supported by:
+            %%     o  Erlang up-to R13B;
+            %%     o  Erlang R14B+ for undocumented backward
+            %%        compatibility.
+            %%
+            %% InitialUserState is ignored in this case.
+            fun(ErrorList) ->
+                    Module:Function(ErrorList)
+            end;
+        {_, false} when not UseNewVerifyFun ->
+            rabbit_log:error("SSL verify_fun: ~s:~s/1 form required "
+              "for Erlang R13B~n", [Module, Function]),
+            throw({error, {invalid_verify_fun, old_form_required}});
+        _ ->
+            Arity = case UseNewVerifyFun of true -> 3; _ -> 1 end,
+            rabbit_log:error("SSL verify_fun: no ~s:~s/~b exported~n",
+              [Module, Function, Arity]),
+            throw({error, {invalid_verify_fun, function_not_exported}})
+    end.
+
+fix_ssl_protocol_versions(Config) ->
+    case application:get_env(rabbit, ssl_allow_poodle_attack) of
+        {ok, true} ->
+            Config;
+        _ ->
+            Configured = case pget(versions, Config) of
+                             undefined -> pget(available, ssl:versions(), []);
+                             Vs        -> Vs
+                         end,
+            pset(versions, Configured -- ?BAD_SSL_PROTOCOL_VERSIONS, Config)
+    end.
+
+ssl_timeout() ->
+    {ok, Val} = application:get_env(rabbit, ssl_handshake_timeout),
+    Val.
+
 ssl_transform_fun(SslOpts) ->
     fun (Sock) ->
-            case catch ssl:ssl_accept(Sock, SslOpts, ?SSL_TIMEOUT * 1000) of
+            Timeout = ssl_timeout(),
+            case catch ssl:ssl_accept(Sock, SslOpts, Timeout) of
                 {ok, SslSock} ->
                     {ok, #ssl_socket{tcp = Sock, ssl = SslSock}};
                 {error, timeout} ->
@@ -185,7 +291,7 @@ ssl_transform_fun(SslOpts) ->
                     %% form, according to the TLS spec). So we give
                     %% the ssl_connection a little bit of time to send
                     %% such alerts.
-                    timer:sleep(?SSL_TIMEOUT * 1000),
+                    timer:sleep(Timeout),
                     {error, {ssl_upgrade_error, Reason}};
                 {'EXIT', Reason} ->
                     {error, {ssl_upgrade_failure, Reason}}
@@ -205,7 +311,7 @@ tcp_listener_addresses({Host, Port, Family0})
     [{IPAddress, Port, Family} ||
         {IPAddress, Family} <- getaddr(Host, Family0)];
 tcp_listener_addresses({_Host, Port, _Family0}) ->
-    error_logger:error_msg("invalid port ~p - not 0..65535~n", [Port]),
+    rabbit_log:error("invalid port ~p - not 0..65535~n", [Port]),
     throw({error, {invalid_port, Port}}).
 
 tcp_listener_addresses_auto(Port) ->
@@ -288,7 +394,11 @@ node_listeners(Node) ->
     mnesia:dirty_read(rabbit_listener, Node).
 
 on_node_down(Node) ->
-    ok = mnesia:dirty_delete(rabbit_listener, Node).
+    case lists:member(Node, nodes()) of
+        false -> ok = mnesia:dirty_delete(rabbit_listener, Node);
+        true  -> rabbit_log:info(
+                   "Keep ~s listeners: the node is already back~n", [Node])
+    end.
 
 start_client(Sock, SockTransform) ->
     {ok, _Child, Reader} = supervisor:start_child(rabbit_tcp_client_sup, []),
@@ -392,7 +502,7 @@ gethostaddr(Host, Family) ->
     end.
 
 host_lookup_error(Host, Reason) ->
-    error_logger:error_msg("invalid host ~p - ~p~n", [Host, Reason]),
+    rabbit_log:error("invalid host ~p - ~p~n", [Host, Reason]),
     throw({error, {invalid_host, Host, Reason}}).
 
 resolve_family({_,_,_,_},         auto) -> inet;
index ca843e14f6db552e9e107d36b82b582460cf97b3..e3960c5c8a6902ac5251659229195643b10ede38 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_node_monitor).
          write_cluster_status/1, read_cluster_status/0,
          update_cluster_status/0, reset_cluster_status/0]).
 -export([notify_node_up/0, notify_joined_cluster/0, notify_left_cluster/1]).
--export([partitions/0, partitions/1, subscribe/1]).
--export([pause_minority_guard/0]).
+-export([partitions/0, partitions/1, status/1, subscribe/1]).
+-export([pause_partition_guard/0]).
+-export([global_sync/0]).
 
 %% gen_server callbacks
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
          code_change/3]).
 
  %% Utils
--export([all_rabbit_nodes_up/0, run_outside_applications/1, ping_all/0]).
+-export([all_rabbit_nodes_up/0, run_outside_applications/2, ping_all/0,
+         alive_nodes/1, alive_rabbit_nodes/1]).
 
 -define(SERVER, ?MODULE).
 -define(RABBIT_UP_RPC_TIMEOUT, 2000).
 -define(RABBIT_DOWN_PING_INTERVAL, 1000).
 
--record(state, {monitors, partitions, subscribers, down_ping_timer, autoheal}).
+-record(state, {monitors, partitions, subscribers, down_ping_timer,
+                keepalive_timer, autoheal, guid, node_guids}).
 
 %%----------------------------------------------------------------------------
 
 
 -spec(partitions/0 :: () -> [node()]).
 -spec(partitions/1 :: ([node()]) -> [{node(), [node()]}]).
+-spec(status/1 :: ([node()]) -> {[{node(), [node()]}], [node()]}).
 -spec(subscribe/1 :: (pid()) -> 'ok').
--spec(pause_minority_guard/0 :: () -> 'ok' | 'pausing').
+-spec(pause_partition_guard/0 :: () -> 'ok' | 'pausing').
 
 -spec(all_rabbit_nodes_up/0 :: () -> boolean()).
--spec(run_outside_applications/1 :: (fun (() -> any())) -> pid()).
+-spec(run_outside_applications/2 :: (fun (() -> any()), boolean()) -> pid()).
 -spec(ping_all/0 :: () -> 'ok').
+-spec(alive_nodes/1 :: ([node()]) -> [node()]).
+-spec(alive_rabbit_nodes/1 :: ([node()]) -> [node()]).
 
 -endif.
 
@@ -158,16 +164,7 @@ reset_cluster_status() ->
 %%----------------------------------------------------------------------------
 
 notify_node_up() ->
-    Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
-    gen_server:abcast(Nodes, ?SERVER,
-                      {node_up, node(), rabbit_mnesia:node_type()}),
-    %% register other active rabbits with this rabbit
-    DiskNodes = rabbit_mnesia:cluster_nodes(disc),
-    [gen_server:cast(?SERVER, {node_up, N, case lists:member(N, DiskNodes) of
-                                               true  -> disc;
-                                               false -> ram
-                                           end}) || N <- Nodes],
-    ok.
+    gen_server:cast(?SERVER, notify_node_up).
 
 notify_joined_cluster() ->
     Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
@@ -191,50 +188,142 @@ partitions(Nodes) ->
     {Replies, _} = gen_server:multi_call(Nodes, ?SERVER, partitions, infinity),
     Replies.
 
+status(Nodes) ->
+    gen_server:multi_call(Nodes, ?SERVER, status, infinity).
+
 subscribe(Pid) ->
     gen_server:cast(?SERVER, {subscribe, Pid}).
 
 %%----------------------------------------------------------------------------
-%% pause_minority safety
+%% pause_minority/pause_if_all_down safety
 %%----------------------------------------------------------------------------
 
 %% If we are in a minority and pause_minority mode then a) we are
 %% going to shut down imminently and b) we should not confirm anything
 %% until then, since anything we confirm is likely to be lost.
 %%
-%% We could confirm something by having an HA queue see the minority
+%% The same principles apply to a node which isn't part of the preferred
+%% partition when we are in pause_if_all_down mode.
+%%
+%% We could confirm something by having an HA queue see the pausing
 %% state (and fail over into it) before the node monitor stops us, or
 %% by using unmirrored queues and just having them vanish (and
 %% confiming messages as thrown away).
 %%
 %% So we have channels call in here before issuing confirms, to do a
-%% lightweight check that we have not entered a minority state.
+%% lightweight check that we have not entered a pausing state.
 
-pause_minority_guard() ->
-    case get(pause_minority_guard) of
-        not_minority_mode ->
+pause_partition_guard() ->
+    case get(pause_partition_guard) of
+        not_pause_mode ->
             ok;
         undefined ->
             {ok, M} = application:get_env(rabbit, cluster_partition_handling),
             case M of
-                pause_minority -> pause_minority_guard([]);
-                _              -> put(pause_minority_guard, not_minority_mode),
-                                  ok
+                pause_minority ->
+                    pause_minority_guard([], ok);
+                {pause_if_all_down, PreferredNodes, _} ->
+                    pause_if_all_down_guard(PreferredNodes, [], ok);
+                _ ->
+                    put(pause_partition_guard, not_pause_mode),
+                    ok
             end;
-        {minority_mode, Nodes} ->
-            pause_minority_guard(Nodes)
+        {minority_mode, Nodes, LastState} ->
+            pause_minority_guard(Nodes, LastState);
+        {pause_if_all_down_mode, PreferredNodes, Nodes, LastState} ->
+            pause_if_all_down_guard(PreferredNodes, Nodes, LastState)
     end.
 
-pause_minority_guard(LastNodes) ->
+pause_minority_guard(LastNodes, LastState) ->
     case nodes() of
-        LastNodes -> ok;
-        _         -> put(pause_minority_guard, {minority_mode, nodes()}),
-                     case majority() of
-                         false -> pausing;
-                         true  -> ok
-                     end
+        LastNodes -> LastState;
+        _         -> NewState = case majority() of
+                                    false -> pausing;
+                                    true  -> ok
+                                end,
+                     put(pause_partition_guard,
+                         {minority_mode, nodes(), NewState}),
+                     NewState
+    end.
+
+pause_if_all_down_guard(PreferredNodes, LastNodes, LastState) ->
+    case nodes() of
+        LastNodes -> LastState;
+        _         -> NewState = case in_preferred_partition(PreferredNodes) of
+                                    false -> pausing;
+                                    true  -> ok
+                                end,
+                     put(pause_partition_guard,
+                         {pause_if_all_down_mode, PreferredNodes, nodes(),
+                          NewState}),
+                     NewState
+    end.
+
+%%----------------------------------------------------------------------------
+%% "global" hang workaround.
+%%----------------------------------------------------------------------------
+
+%% This code works around a possible inconsistency in the "global"
+%% state, causing global:sync/0 to never return.
+%%
+%%     1. A process is spawned.
+%%     2. If after 15", global:sync() didn't return, the "global"
+%%        state is parsed.
+%%     3. If it detects that a sync is blocked for more than 10",
+%%        the process sends fake nodedown/nodeup events to the two
+%%        nodes involved (one local, one remote).
+%%     4. Both "global" instances restart their synchronisation.
+%%     5. globao:sync() finally returns.
+%%
+%% FIXME: Remove this workaround, once we got rid of the change to
+%% "dist_auto_connect" and fixed the bugs uncovered.
+
+global_sync() ->
+    Pid = spawn(fun workaround_global_hang/0),
+    ok = global:sync(),
+    Pid ! global_sync_done,
+    ok.
+
+workaround_global_hang() ->
+    receive
+        global_sync_done ->
+            ok
+    after 15000 ->
+            find_blocked_global_peers()
     end.
 
+find_blocked_global_peers() ->
+    {status, _, _, [Dict | _]} = sys:get_status(global_name_server),
+    find_blocked_global_peers1(Dict).
+
+find_blocked_global_peers1([{{sync_tag_his, Peer}, Timestamp} | Rest]) ->
+    Diff = timer:now_diff(erlang:now(), Timestamp),
+    if
+        Diff >= 10000 -> unblock_global_peer(Peer);
+        true          -> ok
+    end,
+    find_blocked_global_peers1(Rest);
+find_blocked_global_peers1([_ | Rest]) ->
+    find_blocked_global_peers1(Rest);
+find_blocked_global_peers1([]) ->
+    ok.
+
+unblock_global_peer(PeerNode) ->
+    ThisNode = node(),
+    PeerState = rpc:call(PeerNode, sys, get_status, [global_name_server]),
+    error_logger:info_msg(
+      "Global hang workaround: global state on ~s seems broken~n"
+      " * Peer global state:  ~p~n"
+      " * Local global state: ~p~n"
+      "Faking nodedown/nodeup between ~s and ~s~n",
+      [PeerNode, PeerState, sys:get_status(global_name_server),
+       PeerNode, ThisNode]),
+    {global_name_server, ThisNode} ! {nodedown, PeerNode},
+    {global_name_server, PeerNode} ! {nodedown, ThisNode},
+    {global_name_server, ThisNode} ! {nodeup, PeerNode},
+    {global_name_server, PeerNode} ! {nodeup, ThisNode},
+    ok.
+
 %%----------------------------------------------------------------------------
 %% gen_server callbacks
 %%----------------------------------------------------------------------------
@@ -247,17 +336,146 @@ init([]) ->
     process_flag(trap_exit, true),
     net_kernel:monitor_nodes(true, [nodedown_reason]),
     {ok, _} = mnesia:subscribe(system),
-    {ok, #state{monitors    = pmon:new(),
-                subscribers = pmon:new(),
-                partitions  = [],
-                autoheal    = rabbit_autoheal:init()}}.
+    {ok, ensure_keepalive_timer(#state{monitors    = pmon:new(),
+                                       subscribers = pmon:new(),
+                                       partitions  = [],
+                                       guid        = rabbit_guid:gen(),
+                                       node_guids  = orddict:new(),
+                                       autoheal    = rabbit_autoheal:init()})}.
 
 handle_call(partitions, _From, State = #state{partitions = Partitions}) ->
     {reply, Partitions, State};
 
+handle_call(status, _From, State = #state{partitions = Partitions}) ->
+    {reply, [{partitions, Partitions},
+             {nodes,      [node() | nodes()]}], State};
+
 handle_call(_Request, _From, State) ->
     {noreply, State}.
 
+handle_cast(notify_node_up, State = #state{guid = GUID}) ->
+    Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
+    gen_server:abcast(Nodes, ?SERVER,
+                      {node_up, node(), rabbit_mnesia:node_type(), GUID}),
+    %% register other active rabbits with this rabbit
+    DiskNodes = rabbit_mnesia:cluster_nodes(disc),
+    [gen_server:cast(?SERVER, {node_up, N, case lists:member(N, DiskNodes) of
+                                               true  -> disc;
+                                               false -> ram
+                                           end}) || N <- Nodes],
+    {noreply, State};
+
+%%----------------------------------------------------------------------------
+%% Partial partition detection
+%%
+%% Every node generates a GUID each time it starts, and announces that
+%% GUID in 'node_up', with 'announce_guid' sent by return so the new
+%% node knows the GUIDs of the others. These GUIDs are sent in all the
+%% partial partition related messages to ensure that we ignore partial
+%% partition messages from before we restarted (to avoid getting stuck
+%% in a loop).
+%%
+%% When one node gets nodedown from another, it then sends
+%% 'check_partial_partition' to all the nodes it still thinks are
+%% alive. If any of those (intermediate) nodes still see the "down"
+%% node as up, they inform it that this has happened. The original
+%% node (in 'ignore', 'pause_if_all_down' or 'autoheal' mode) will then
+%% disconnect from the intermediate node to "upgrade" to a full
+%% partition.
+%%
+%% In pause_minority mode it will instead immediately pause until all
+%% nodes come back. This is because the contract for pause_minority is
+%% that nodes should never sit in a partitioned state - if it just
+%% disconnected, it would become a minority, pause, realise it's not
+%% in a minority any more, and come back, still partitioned (albeit no
+%% longer partially).
+%% ----------------------------------------------------------------------------
+
+handle_cast({node_up, Node, NodeType, GUID},
+            State = #state{guid       = MyGUID,
+                           node_guids = GUIDs}) ->
+    cast(Node, {announce_guid, node(), MyGUID}),
+    GUIDs1 = orddict:store(Node, GUID, GUIDs),
+    handle_cast({node_up, Node, NodeType}, State#state{node_guids = GUIDs1});
+
+handle_cast({announce_guid, Node, GUID}, State = #state{node_guids = GUIDs}) ->
+    {noreply, State#state{node_guids = orddict:store(Node, GUID, GUIDs)}};
+
+handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
+            State = #state{guid       = MyGUID,
+                           node_guids = GUIDs}) ->
+    case lists:member(Node, rabbit_mnesia:cluster_nodes(running)) andalso
+        orddict:find(Node, GUIDs) =:= {ok, NodeGUID} of
+        true  -> spawn_link( %%[1]
+                   fun () ->
+                           case rpc:call(Node, rabbit, is_running, []) of
+                               {badrpc, _} -> ok;
+                               _           -> cast(Rep, {partial_partition,
+                                                         Node, node(), RepGUID})
+                           end
+                   end);
+        false -> ok
+    end,
+    {noreply, State};
+%% [1] We checked that we haven't heard the node go down - but we
+%% really should make sure we can actually communicate with
+%% it. Otherwise there's a race where we falsely detect a partial
+%% partition.
+%%
+%% Now of course the rpc:call/4 may take a long time to return if
+%% connectivity with the node is actually interrupted - but that's OK,
+%% we only really want to do something in a timely manner if
+%% connectivity is OK. However, of course as always we must not block
+%% the node monitor, so we do the check in a separate process.
+
+handle_cast({check_partial_partition, _Node, _Reporter,
+             _NodeGUID, _GUID, _ReporterGUID}, State) ->
+    {noreply, State};
+
+handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
+            State = #state{guid = MyGUID}) ->
+    FmtBase = "Partial partition detected:~n"
+        " * We saw DOWN from ~s~n"
+        " * We can still see ~s which can see ~s~n",
+    ArgsBase = [NotReallyDown, Proxy, NotReallyDown],
+    case application:get_env(rabbit, cluster_partition_handling) of
+        {ok, pause_minority} ->
+            rabbit_log:error(
+              FmtBase ++ " * pause_minority mode enabled~n"
+              "We will therefore pause until the *entire* cluster recovers~n",
+              ArgsBase),
+            await_cluster_recovery(fun all_nodes_up/0),
+            {noreply, State};
+        {ok, {pause_if_all_down, PreferredNodes, _}} ->
+            case in_preferred_partition(PreferredNodes) of
+                true  -> rabbit_log:error(
+                           FmtBase ++ "We will therefore intentionally "
+                           "disconnect from ~s~n", ArgsBase ++ [Proxy]),
+                         upgrade_to_full_partition(Proxy);
+                false -> rabbit_log:info(
+                           FmtBase ++ "We are about to pause, no need "
+                           "for further actions~n", ArgsBase)
+            end,
+            {noreply, State};
+        {ok, _} ->
+            rabbit_log:error(
+              FmtBase ++ "We will therefore intentionally disconnect from ~s~n",
+              ArgsBase ++ [Proxy]),
+            upgrade_to_full_partition(Proxy),
+            {noreply, State}
+    end;
+
+handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
+    {noreply, State};
+
+%% Sometimes it appears the Erlang VM does not give us nodedown
+%% messages reliably when another node disconnects from us. Therefore
+%% we are told just before the disconnection so we can reciprocate.
+handle_cast({partial_partition_disconnect, Other}, State) ->
+    rabbit_log:error("Partial partition disconnect from ~s~n", [Other]),
+    disconnect(Other),
+    {noreply, State};
+
 %% Note: when updating the status file, we can't simply write the
 %% mnesia information since the message can (and will) overtake the
 %% mnesia propagation.
@@ -274,9 +492,10 @@ handle_cast({node_up, Node, NodeType},
                                        end,
                                        add_node(Node, RunningNodes)}),
                  ok = handle_live_rabbit(Node),
-                 {noreply, State#state{
-                             monitors = pmon:monitor({rabbit, Node}, Monitors)}}
+                 Monitors1 = pmon:monitor({rabbit, Node}, Monitors),
+                 {noreply, maybe_autoheal(State#state{monitors = Monitors1})}
     end;
+
 handle_cast({joined_cluster, Node, NodeType}, State) ->
     {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
     write_cluster_status({add_node(Node, AllNodes),
@@ -286,13 +505,19 @@ handle_cast({joined_cluster, Node, NodeType}, State) ->
                           end,
                           RunningNodes}),
     {noreply, State};
+
 handle_cast({left_cluster, Node}, State) ->
     {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
     write_cluster_status({del_node(Node, AllNodes), del_node(Node, DiscNodes),
                           del_node(Node, RunningNodes)}),
     {noreply, State};
+
 handle_cast({subscribe, Pid}, State = #state{subscribers = Subscribers}) ->
     {noreply, State#state{subscribers = pmon:monitor(Pid, Subscribers)}};
+
+handle_cast(keepalive, State) ->
+    {noreply, State};
+
 handle_cast(_Msg, State) ->
     {noreply, State}.
 
@@ -310,16 +535,33 @@ handle_info({'DOWN', _MRef, process, Pid, _Reason},
             State = #state{subscribers = Subscribers}) ->
     {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
 
-handle_info({nodedown, Node, Info}, State) ->
+handle_info({nodedown, Node, Info}, State = #state{guid       = MyGUID,
+                                                   node_guids = GUIDs}) ->
     rabbit_log:info("node ~p down: ~p~n",
                     [Node, proplists:get_value(nodedown_reason, Info)]),
+    Check = fun (N, CheckGUID, DownGUID) ->
+                    cast(N, {check_partial_partition,
+                             Node, node(), DownGUID, CheckGUID, MyGUID})
+            end,
+    case orddict:find(Node, GUIDs) of
+        {ok, DownGUID} -> Alive = rabbit_mnesia:cluster_nodes(running)
+                              -- [node(), Node],
+                          [case orddict:find(N, GUIDs) of
+                               {ok, CheckGUID} -> Check(N, CheckGUID, DownGUID);
+                               error           -> ok
+                           end || N <- Alive];
+        error          -> ok
+    end,
     {noreply, handle_dead_node(Node, State)};
 
+handle_info({nodeup, Node, _Info}, State) ->
+    rabbit_log:info("node ~p up~n", [Node]),
+    {noreply, State};
+
 handle_info({mnesia_system_event,
              {inconsistent_database, running_partitioned_network, Node}},
             State = #state{partitions = Partitions,
-                           monitors   = Monitors,
-                           autoheal   = AState}) ->
+                           monitors   = Monitors}) ->
     %% We will not get a node_up from this node - yet we should treat it as
     %% up (mostly).
     State1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
@@ -328,17 +570,15 @@ handle_info({mnesia_system_event,
                             monitors = pmon:monitor({rabbit, Node}, Monitors)}
              end,
     ok = handle_live_rabbit(Node),
-    Partitions1 = ordsets:to_list(
-                    ordsets:add_element(Node, ordsets:from_list(Partitions))),
-    {noreply, State1#state{partitions = Partitions1,
-                           autoheal   = rabbit_autoheal:maybe_start(AState)}};
+    Partitions1 = lists:usort([Node | Partitions]),
+    {noreply, maybe_autoheal(State1#state{partitions = Partitions1})};
 
 handle_info({autoheal_msg, Msg}, State = #state{autoheal   = AState,
                                                 partitions = Partitions}) ->
     AState1 = rabbit_autoheal:handle_msg(Msg, AState, Partitions),
     {noreply, State#state{autoheal = AState1}};
 
-handle_info(ping_nodes, State) ->
+handle_info(ping_down_nodes, State) ->
     %% We ping nodes when some are down to ensure that we find out
     %% about healed partitions quickly. We ping all nodes rather than
     %% just the ones we know are down for simplicity; it's not expensive
@@ -352,14 +592,20 @@ handle_info(ping_nodes, State) ->
                        ping_all(),
                        case all_nodes_up() of
                            true  -> ok;
-                           false -> Self ! ping_again
+                           false -> Self ! ping_down_nodes_again
                        end
                end),
     {noreply, State1};
 
-handle_info(ping_again, State) ->
+handle_info(ping_down_nodes_again, State) ->
     {noreply, ensure_ping_timer(State)};
 
+handle_info(ping_up_nodes, State) ->
+    %% In this case we need to ensure that we ping "quickly" -
+    %% i.e. only nodes that we know to be up.
+    [cast(N, keepalive) || N <- alive_nodes() -- [node()]],
+    {noreply, ensure_keepalive_timer(State#state{keepalive_timer = undefined})};
+
 handle_info(_Info, State) ->
     {noreply, State}.
 
@@ -380,17 +626,29 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
     %% that we can respond in the same way to "rabbitmqctl stop_app"
     %% and "rabbitmqctl stop" as much as possible.
     %%
-    %% However, for pause_minority mode we can't do this, since we
-    %% depend on looking at whether other nodes are up to decide
-    %% whether to come back up ourselves - if we decide that based on
-    %% the rabbit application we would go down and never come back.
+    %% However, for pause_minority and pause_if_all_down modes we can't do
+    %% this, since we depend on looking at whether other nodes are up
+    %% to decide whether to come back up ourselves - if we decide that
+    %% based on the rabbit application we would go down and never come
+    %% back.
     case application:get_env(rabbit, cluster_partition_handling) of
         {ok, pause_minority} ->
-            case majority() of
+            case majority([Node]) of
                 true  -> ok;
-                false -> await_cluster_recovery()
+                false -> await_cluster_recovery(fun majority/0)
             end,
             State;
+        {ok, {pause_if_all_down, PreferredNodes, HowToRecover}} ->
+            case in_preferred_partition(PreferredNodes, [Node]) of
+                true  -> ok;
+                false -> await_cluster_recovery(
+                           fun in_preferred_partition/0)
+            end,
+            case HowToRecover of
+                autoheal -> State#state{autoheal =
+                              rabbit_autoheal:node_down(Node, Autoheal)};
+                _        -> State
+            end;
         {ok, ignore} ->
             State;
         {ok, autoheal} ->
@@ -401,34 +659,63 @@ handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
             State
     end.
 
-await_cluster_recovery() ->
-    rabbit_log:warning("Cluster minority status detected - awaiting recovery~n",
-                       []),
+await_cluster_recovery(Condition) ->
+    rabbit_log:warning("Cluster minority/secondary status detected - "
+                       "awaiting recovery~n", []),
     run_outside_applications(fun () ->
                                      rabbit:stop(),
-                                     wait_for_cluster_recovery()
-                             end),
+                                     wait_for_cluster_recovery(Condition)
+                             end, false),
     ok.
 
-run_outside_applications(Fun) ->
+run_outside_applications(Fun, WaitForExistingProcess) ->
     spawn(fun () ->
                   %% If our group leader is inside an application we are about
                   %% to stop, application:stop/1 does not return.
                   group_leader(whereis(init), self()),
-                  %% Ensure only one such process at a time, the
-                  %% exit(badarg) is harmless if one is already running
-                  try register(rabbit_outside_app_process, self()) of
-                      true           -> Fun()
-                  catch error:badarg -> ok
-                  end
+                  register_outside_app_process(Fun, WaitForExistingProcess)
           end).
 
-wait_for_cluster_recovery() ->
+register_outside_app_process(Fun, WaitForExistingProcess) ->
+    %% Ensure only one such process at a time, the exit(badarg) is
+    %% harmless if one is already running.
+    %%
+    %% If WaitForExistingProcess is false, the given fun is simply not
+    %% executed at all and the process exits.
+    %%
+    %% If WaitForExistingProcess is true, we wait for the end of the
+    %% currently running process before executing the given function.
+    try register(rabbit_outside_app_process, self()) of
+        true ->
+            do_run_outside_app_fun(Fun)
+    catch
+        error:badarg when WaitForExistingProcess ->
+            MRef = erlang:monitor(process, rabbit_outside_app_process),
+            receive
+                {'DOWN', MRef, _, _, _} ->
+                    %% The existing process exited, let's try to
+                    %% register again.
+                    register_outside_app_process(Fun, WaitForExistingProcess)
+            end;
+        error:badarg ->
+            ok
+    end.
+
+do_run_outside_app_fun(Fun) ->
+    try
+        Fun()
+    catch _:E ->
+            rabbit_log:error(
+              "rabbit_outside_app_process:~n~p~n~p~n",
+              [E, erlang:get_stacktrace()])
+    end.
+
+wait_for_cluster_recovery(Condition) ->
     ping_all(),
-    case majority() of
+    case Condition() of
         true  -> rabbit:start();
         false -> timer:sleep(?RABBIT_DOWN_PING_INTERVAL),
-                 wait_for_cluster_recovery()
+                 wait_for_cluster_recovery(Condition)
     end.
 
 handle_dead_rabbit(Node, State = #state{partitions = Partitions,
@@ -445,7 +732,9 @@ handle_dead_rabbit(Node, State = #state{partitions = Partitions,
     %% that we do not attempt to deal with individual (other) partitions
     %% going away. It's only safe to forget anything about partitions when
     %% there are no partitions.
-    Partitions1 = case Partitions -- (Partitions -- alive_rabbit_nodes()) of
+    Down = Partitions -- alive_rabbit_nodes(),
+    NoLongerPartitioned = rabbit_mnesia:cluster_nodes(running),
+    Partitions1 = case Partitions -- Down -- NoLongerPartitioned of
                       [] -> [];
                       _  -> Partitions
                   end,
@@ -455,12 +744,28 @@ handle_dead_rabbit(Node, State = #state{partitions = Partitions,
 
 ensure_ping_timer(State) ->
     rabbit_misc:ensure_timer(
-      State, #state.down_ping_timer, ?RABBIT_DOWN_PING_INTERVAL, ping_nodes).
+      State, #state.down_ping_timer, ?RABBIT_DOWN_PING_INTERVAL,
+      ping_down_nodes).
+
+ensure_keepalive_timer(State) ->
+    {ok, Interval} = application:get_env(rabbit, cluster_keepalive_interval),
+    rabbit_misc:ensure_timer(
+      State, #state.keepalive_timer, Interval, ping_up_nodes).
 
 handle_live_rabbit(Node) ->
+    ok = rabbit_amqqueue:on_node_up(Node),
     ok = rabbit_alarm:on_node_up(Node),
     ok = rabbit_mnesia:on_node_up(Node).
 
+maybe_autoheal(State = #state{partitions = []}) ->
+    State;
+
+maybe_autoheal(State = #state{autoheal = AState}) ->
+    case all_nodes_up() of
+        true  -> State#state{autoheal = rabbit_autoheal:maybe_start(AState)};
+        false -> State
+    end.
+
 %%--------------------------------------------------------------------
 %% Internal utils
 %%--------------------------------------------------------------------
@@ -488,22 +793,60 @@ add_node(Node, Nodes) -> lists:usort([Node | Nodes]).
 
 del_node(Node, Nodes) -> Nodes -- [Node].
 
+cast(Node, Msg) -> gen_server:cast({?SERVER, Node}, Msg).
+
+upgrade_to_full_partition(Proxy) ->
+    cast(Proxy, {partial_partition_disconnect, node()}),
+    disconnect(Proxy).
+
+%% When we call this, it's because we want to force Mnesia to detect a
+%% partition. But if we just disconnect_node/1 then Mnesia won't
+%% detect a very short partition. So we want to force a slightly
+%% longer disconnect. Unfortunately we don't have a way to blacklist
+%% individual nodes; the best we can do is turn off auto-connect
+%% altogether.
+disconnect(Node) ->
+    application:set_env(kernel, dist_auto_connect, never),
+    erlang:disconnect_node(Node),
+    timer:sleep(1000),
+    application:unset_env(kernel, dist_auto_connect),
+    ok.
+
 %%--------------------------------------------------------------------
 
 %% mnesia:system_info(db_nodes) (and hence
-%% rabbit_mnesia:cluster_nodes(running)) does not give reliable
-%% results when partitioned. So we have a small set of replacement
-%% functions here. "rabbit" in a function's name implies we test if
-%% the rabbit application is up, not just the node.
+%% rabbit_mnesia:cluster_nodes(running)) does not return all nodes
+%% when partitioned, just those that we are sharing Mnesia state
+%% with. So we have a small set of replacement functions
+%% here. "rabbit" in a function's name implies we test if the rabbit
+%% application is up, not just the node.
 
-%% As we use these functions to decide what to do in pause_minority
-%% state, they *must* be fast, even in the case where TCP connections
-%% are timing out. So that means we should be careful about whether we
-%% connect to nodes which are currently disconnected.
+%% As we use these functions to decide what to do in pause_minority or
+%% pause_if_all_down states, they *must* be fast, even in the case where
+%% TCP connections are timing out. So that means we should be careful
+%% about whether we connect to nodes which are currently disconnected.
 
 majority() ->
+    majority([]).
+
+majority(NodesDown) ->
+    Nodes = rabbit_mnesia:cluster_nodes(all),
+    AliveNodes = alive_nodes(Nodes) -- NodesDown,
+    length(AliveNodes) / length(Nodes) > 0.5.
+
+in_preferred_partition() ->
+    {ok, {pause_if_all_down, PreferredNodes, _}} =
+        application:get_env(rabbit, cluster_partition_handling),
+    in_preferred_partition(PreferredNodes).
+
+in_preferred_partition(PreferredNodes) ->
+    in_preferred_partition(PreferredNodes, []).
+
+in_preferred_partition(PreferredNodes, NodesDown) ->
     Nodes = rabbit_mnesia:cluster_nodes(all),
-    length(alive_nodes(Nodes)) / length(Nodes) > 0.5.
+    RealPreferredNodes = [N || N <- PreferredNodes, lists:member(N, Nodes)],
+    AliveNodes = alive_nodes(RealPreferredNodes) -- NodesDown,
+    RealPreferredNodes =:= [] orelse AliveNodes =/= [].
 
 all_nodes_up() ->
     Nodes = rabbit_mnesia:cluster_nodes(all),
@@ -513,6 +856,7 @@ all_rabbit_nodes_up() ->
     Nodes = rabbit_mnesia:cluster_nodes(all),
     length(alive_rabbit_nodes(Nodes)) =:= length(Nodes).
 
+alive_nodes() -> alive_nodes(rabbit_mnesia:cluster_nodes(all)).
 alive_nodes(Nodes) -> [N || N <- Nodes, lists:member(N, [node()|nodes()])].
 
 alive_rabbit_nodes() -> alive_rabbit_nodes(rabbit_mnesia:cluster_nodes(all)).
index 7f7fcc3126be1cd4b354256db7eb3a725fb0f996..090aacc63cacd56e4e1c245589845477636086d5 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_nodes).
 
 -export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0,
          is_running/2, is_process_running/2,
-         cluster_name/0, set_cluster_name/1]).
+         cluster_name/0, set_cluster_name/1, ensure_epmd/0]).
 
 -include_lib("kernel/include/inet.hrl").
 
@@ -41,6 +41,7 @@
 -spec(is_process_running/2 :: (node(), atom()) -> boolean()).
 -spec(cluster_name/0 :: () -> binary()).
 -spec(set_cluster_name/1 :: (binary()) -> 'ok').
+-spec(ensure_epmd/0 :: () -> 'ok').
 
 -endif.
 
@@ -136,7 +137,8 @@ dist_broken_diagnostics(Name, Host, NamePorts) ->
                      [{"  * TCP connection succeeded but Erlang distribution "
                        "failed~n"
                        "  * suggestion: hostname mismatch?~n"
-                       "  * suggestion: is the cookie set correctly?", []}];
+                       "  * suggestion: is the cookie set correctly?~n"
+                       "  * suggestion: is the Erlang distribution using TLS?", []}];
                  {error, Reason} ->
                      [{"  * can't establish TCP connection, reason: ~s~n"
                        "  * suggestion: blocked by firewall?",
@@ -197,3 +199,19 @@ cluster_name_default() ->
 
 set_cluster_name(Name) ->
     rabbit_runtime_parameters:set_global(cluster_name, Name).
+
+ensure_epmd() ->
+    {ok, Prog} = init:get_argument(progname),
+    ID = random:uniform(1000000000),
+    Port = open_port(
+             {spawn_executable, os:find_executable(Prog)},
+             [{args, ["-sname", rabbit_misc:format("epmd-starter-~b", [ID]),
+                      "-noshell", "-eval", "halt()."]},
+              exit_status, stderr_to_stdout, use_stdio]),
+    port_shutdown_loop(Port).
+
+port_shutdown_loop(Port) ->
+    receive
+        {Port, {exit_status, _Rc}} -> ok;
+        {Port, _}                  -> port_shutdown_loop(Port)
+    end.
index c42bcc4adcca33dc25f7d6f2bf838f24b0654957..a54f02bf06baab69d5eeb6e03fa8562eab3105b7 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_parameter_validation).
index c0fb05e24771eb1fe394107d9541a85e49136174..329703234640be1e3d12afb6bbb229521095cde5 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_plugins).
 -include("rabbit.hrl").
 
 -export([setup/0, active/0, read_enabled/1, list/1, dependencies/3]).
+-export([ensure/1]).
 
 %%----------------------------------------------------------------------------
 
 -spec(read_enabled/1 :: (file:filename()) -> [plugin_name()]).
 -spec(dependencies/3 :: (boolean(), [plugin_name()], [#plugin{}]) ->
                              [plugin_name()]).
-
+-spec(ensure/1  :: (string()) -> {'ok', [atom()], [atom()]} | {error, any()}).
 -endif.
 
 %%----------------------------------------------------------------------------
 
+ensure(FileJustChanged0) ->
+    {ok, OurFile0} = application:get_env(rabbit, enabled_plugins_file),
+    FileJustChanged = filename:nativename(FileJustChanged0),
+    OurFile = filename:nativename(OurFile0),
+    case OurFile of
+        FileJustChanged ->
+            Enabled = read_enabled(OurFile),
+            Wanted = prepare_plugins(Enabled),
+            Current = active(),
+            Start = Wanted -- Current,
+            Stop = Current -- Wanted,
+            rabbit:start_apps(Start),
+            %% We need sync_notify here since mgmt will attempt to look at all
+            %% the modules for the disabled plugins - if they are unloaded
+            %% that won't work.
+            ok = rabbit_event:sync_notify(plugins_changed, [{enabled,  Start},
+                                                            {disabled, Stop}]),
+            rabbit:stop_apps(Stop),
+            clean_plugins(Stop),
+            rabbit_log:info("Plugins changed; enabled ~p, disabled ~p~n",
+                            [Start, Stop]),
+            {ok, Start, Stop};
+        _ ->
+            {error, {enabled_plugins_mismatch, FileJustChanged, OurFile}}
+    end.
+
 %% @doc Prepares the file system and installs all enabled plugins.
 setup() ->
-    {ok, PluginDir}   = application:get_env(rabbit, plugins_dir),
     {ok, ExpandDir}   = application:get_env(rabbit, plugins_expand_dir),
+
+    %% Eliminate the contents of the destination directory
+    case delete_recursively(ExpandDir) of
+        ok          -> ok;
+        {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
+                                      [ExpandDir, E1]}})
+    end,
+
     {ok, EnabledFile} = application:get_env(rabbit, enabled_plugins_file),
-    prepare_plugins(EnabledFile, PluginDir, ExpandDir).
+    Enabled = read_enabled(EnabledFile),
+    prepare_plugins(Enabled).
 
 %% @doc Lists the plugins which are currently running.
 active() ->
     {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
-    InstalledPlugins = [ P#plugin.name || P <- list(ExpandDir) ],
+    InstalledPlugins = plugin_names(list(ExpandDir)),
     [App || {App, _, _} <- rabbit_misc:which_applications(),
             lists:member(App, InstalledPlugins)].
 
@@ -55,7 +90,7 @@ list(PluginsDir) ->
     EZs = [{ez, EZ} || EZ <- filelib:wildcard("*.ez", PluginsDir)],
     FreeApps = [{app, App} ||
                    App <- filelib:wildcard("*/ebin/*.app", PluginsDir)],
-    {Plugins, Problems} =
+    {AvailablePlugins, Problems} =
         lists:foldl(fun ({error, EZ, Reason}, {Plugins1, Problems1}) ->
                             {Plugins1, [{EZ, Reason} | Problems1]};
                         (Plugin = #plugin{}, {Plugins1, Problems1}) ->
@@ -64,10 +99,12 @@ list(PluginsDir) ->
                     [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]),
     case Problems of
         [] -> ok;
-        _  -> error_logger:warning_msg(
+        _  -> rabbit_log:warning(
                 "Problem reading some plugins: ~p~n", [Problems])
     end,
-    Plugins.
+    Plugins = lists:filter(fun(P) -> not plugin_provided_by_otp(P) end,
+                           AvailablePlugins),
+    ensure_dependencies(Plugins).
 
 %% @doc Read the list of enabled plugins from the supplied term file.
 read_enabled(PluginsFile) ->
@@ -86,15 +123,10 @@ read_enabled(PluginsFile) ->
 %% the resulting list, otherwise they're skipped.
 dependencies(Reverse, Sources, AllPlugins) ->
     {ok, G} = rabbit_misc:build_acyclic_graph(
-                fun (App, _Deps) -> [{App, App}] end,
-                fun (App,  Deps) -> [{App, Dep} || Dep <- Deps] end,
-                lists:ukeysort(
-                  1, [{Name, Deps} ||
-                         #plugin{name         = Name,
-                                 dependencies = Deps} <- AllPlugins] ++
-                      [{Dep,   []} ||
-                          #plugin{dependencies = Deps} <- AllPlugins,
-                          Dep                          <- Deps])),
+                fun ({App, _Deps}) -> [{App, App}] end,
+                fun ({App,  Deps}) -> [{App, Dep} || Dep <- Deps] end,
+                [{Name, Deps} || #plugin{name         = Name,
+                                         dependencies = Deps} <- AllPlugins]),
     Dests = case Reverse of
                 false -> digraph_utils:reachable(Sources, G);
                 true  -> digraph_utils:reaching(Sources, G)
@@ -102,41 +134,101 @@ dependencies(Reverse, Sources, AllPlugins) ->
     true = digraph:delete(G),
     Dests.
 
+%% For a few known cases, an externally provided plugin can be trusted.
+%% In this special case, it overrides the plugin.
+plugin_provided_by_otp(#plugin{name = eldap}) ->
+    %% eldap was added to Erlang/OTP R15B01 (ERTS 5.9.1). In this case,
+    %% we prefer this version to the plugin.
+    rabbit_misc:version_compare(erlang:system_info(version), "5.9.1", gte);
+plugin_provided_by_otp(_) ->
+    false.
+
+%% Make sure we don't list OTP apps in here, and also that we detect
+%% missing dependencies.
+ensure_dependencies(Plugins) ->
+    Names = plugin_names(Plugins),
+    NotThere = [Dep || #plugin{dependencies = Deps} <- Plugins,
+                       Dep                          <- Deps,
+                       not lists:member(Dep, Names)],
+    {OTP, Missing} = lists:partition(fun is_loadable/1, lists:usort(NotThere)),
+    case Missing of
+        [] -> ok;
+        _  -> Blame = [Name || #plugin{name         = Name,
+                                       dependencies = Deps} <- Plugins,
+                               lists:any(fun (Dep) ->
+                                                 lists:member(Dep, Missing)
+                                         end, Deps)],
+              throw({error, {missing_dependencies, Missing, Blame}})
+    end,
+    [P#plugin{dependencies = Deps -- OTP}
+     || P = #plugin{dependencies = Deps} <- Plugins].
+
+is_loadable(App) ->
+    case application:load(App) of
+        {error, {already_loaded, _}} -> true;
+        ok                           -> application:unload(App),
+                                        true;
+        _                            -> false
+    end.
+
 %%----------------------------------------------------------------------------
 
-prepare_plugins(EnabledFile, PluginsDistDir, ExpandDir) ->
+prepare_plugins(Enabled) ->
+    {ok, PluginsDistDir} = application:get_env(rabbit, plugins_dir),
+    {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+
     AllPlugins = list(PluginsDistDir),
-    Enabled = read_enabled(EnabledFile),
-    ToUnpack = dependencies(false, Enabled, AllPlugins),
-    ToUnpackPlugins = lookup_plugins(ToUnpack, AllPlugins),
-
-    case Enabled -- plugin_names(ToUnpackPlugins) of
-        []      -> ok;
-        Missing -> error_logger:warning_msg(
-                     "The following enabled plugins were not found: ~p~n",
-                     [Missing])
-    end,
+    Wanted = dependencies(false, Enabled, AllPlugins),
+    WantedPlugins = lookup_plugins(Wanted, AllPlugins),
 
-    %% Eliminate the contents of the destination directory
-    case delete_recursively(ExpandDir) of
-        ok          -> ok;
-        {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
-                                      [ExpandDir, E1]}})
-    end,
     case filelib:ensure_dir(ExpandDir ++ "/") of
         ok          -> ok;
         {error, E2} -> throw({error, {cannot_create_plugins_expand_dir,
                                       [ExpandDir, E2]}})
     end,
 
-    [prepare_plugin(Plugin, ExpandDir) || Plugin <- ToUnpackPlugins],
+    [prepare_plugin(Plugin, ExpandDir) || Plugin <- WantedPlugins],
 
     [prepare_dir_plugin(PluginAppDescPath) ||
-        PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")].
+        PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")],
+    Wanted.
+
+clean_plugins(Plugins) ->
+    {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+    [clean_plugin(Plugin, ExpandDir) || Plugin <- Plugins].
+
+clean_plugin(Plugin, ExpandDir) ->
+    {ok, Mods} = application:get_key(Plugin, modules),
+    application:unload(Plugin),
+    [begin
+         code:soft_purge(Mod),
+         code:delete(Mod),
+         false = code:is_loaded(Mod)
+     end || Mod <- Mods],
+    delete_recursively(rabbit_misc:format("~s/~s", [ExpandDir, Plugin])).
 
 prepare_dir_plugin(PluginAppDescPath) ->
-    code:add_path(filename:dirname(PluginAppDescPath)),
-    list_to_atom(filename:basename(PluginAppDescPath, ".app")).
+    PluginEbinDir = filename:dirname(PluginAppDescPath),
+    Plugin = filename:basename(PluginAppDescPath, ".app"),
+    code:add_patha(PluginEbinDir),
+    case filelib:wildcard(PluginEbinDir++ "/*.beam") of
+        [] ->
+            ok;
+        [BeamPath | _] ->
+            Module = list_to_atom(filename:basename(BeamPath, ".beam")),
+            case code:ensure_loaded(Module) of
+                {module, _} ->
+                    ok;
+                {error, badfile} ->
+                    rabbit_log:error("Failed to enable plugin \"~s\": "
+                                     "it may have been built with an "
+                                     "incompatible (more recent?) "
+                                     "version of Erlang~n", [Plugin]),
+                    throw({plugin_built_with_incompatible_erlang, Plugin});
+                Error ->
+                    throw({plugin_module_unloadable, Plugin, Error})
+            end
+    end.
 
 %%----------------------------------------------------------------------------
 
@@ -172,8 +264,7 @@ plugin_info(Base, {app, App0}) ->
 mkplugin(Name, Props, Type, Location) ->
     Version = proplists:get_value(vsn, Props, "0"),
     Description = proplists:get_value(description, Props, ""),
-    Dependencies =
-        filter_applications(proplists:get_value(applications, Props, [])),
+    Dependencies = proplists:get_value(applications, Props, []),
     #plugin{name = Name, version = Version, description = Description,
             dependencies = Dependencies, location = Location, type = Type}.
 
@@ -206,18 +297,6 @@ parse_binary(Bin) ->
         Err -> {error, {invalid_app, Err}}
     end.
 
-filter_applications(Applications) ->
-    [Application || Application <- Applications,
-                    not is_available_app(Application)].
-
-is_available_app(Application) ->
-    case application:load(Application) of
-        {error, {already_loaded, _}} -> true;
-        ok                           -> application:unload(Application),
-                                        true;
-        _                            -> false
-    end.
-
 plugin_names(Plugins) ->
     [Name || #plugin{name = Name} <- Plugins].
 
index 89e16f14b21ebbe6818051897e4e9b85d590622e..a4d5490c0959dce1948691c682c2bc43514cfa5b 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_plugins_main).
 -include("rabbit.hrl").
+-include("rabbit_cli.hrl").
 
--export([start/0, stop/0]).
+-export([start/0, stop/0, action/6]).
 
--define(VERBOSE_OPT, "-v").
--define(MINIMAL_OPT, "-m").
--define(ENABLED_OPT, "-E").
--define(ENABLED_ALL_OPT, "-e").
-
--define(VERBOSE_DEF, {?VERBOSE_OPT, flag}).
--define(MINIMAL_DEF, {?MINIMAL_OPT, flag}).
--define(ENABLED_DEF, {?ENABLED_OPT, flag}).
--define(ENABLED_ALL_DEF, {?ENABLED_ALL_OPT, flag}).
-
--define(GLOBAL_DEFS, []).
+-define(GLOBAL_DEFS(Node), [?NODE_DEF(Node)]).
 
 -define(COMMANDS,
         [{list, [?VERBOSE_DEF, ?MINIMAL_DEF, ?ENABLED_DEF, ?ENABLED_ALL_DEF]},
-         enable,
-         disable]).
+         {enable, [?OFFLINE_DEF, ?ONLINE_DEF]},
+         {disable, [?OFFLINE_DEF, ?ONLINE_DEF]},
+         {set, [?OFFLINE_DEF, ?ONLINE_DEF]},
+         {sync, []}]).
 
 %%----------------------------------------------------------------------------
 
 
 -spec(start/0 :: () -> no_return()).
 -spec(stop/0 :: () -> 'ok').
--spec(usage/0 :: () -> no_return()).
 
 -endif.
 
 %%----------------------------------------------------------------------------
 
-start() ->
-    {ok, [[PluginsFile|_]|_]} =
-        init:get_argument(enabled_plugins_file),
-    {ok, [[PluginsDir|_]|_]} = init:get_argument(plugins_dist_dir),
-    {Command, Opts, Args} =
-        case rabbit_misc:parse_arguments(?COMMANDS, ?GLOBAL_DEFS,
-                                         init:get_plain_arguments())
-        of
-            {ok, Res}  -> Res;
-            no_command -> print_error("could not recognise command", []),
-                          usage()
-        end,
+-record(cli, {file, dir, all, enabled, implicit}).
 
-    PrintInvalidCommandError =
-        fun () ->
-                print_error("invalid command '~s'",
-                            [string:join([atom_to_list(Command) | Args], " ")])
-        end,
 
-    case catch action(Command, Args, Opts, PluginsFile, PluginsDir) of
-        ok ->
-            rabbit_misc:quit(0);
-        {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} ->
-            PrintInvalidCommandError(),
-            usage();
-        {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} ->
-            PrintInvalidCommandError(),
-            usage();
-        {error, Reason} ->
-            print_error("~p", [Reason]),
-            rabbit_misc:quit(2);
-        {error_string, Reason} ->
-            print_error("~s", [Reason]),
-            rabbit_misc:quit(2);
-        Other ->
-            print_error("~p", [Other]),
-            rabbit_misc:quit(2)
-    end.
+start() ->
+    {ok, [[PluginsFile|_]|_]} = init:get_argument(enabled_plugins_file),
+    {ok, [[PluginsDir |_]|_]} = init:get_argument(plugins_dist_dir),
+    rabbit_cli:main(
+      fun (Args, NodeStr) ->
+              parse_arguments(Args, NodeStr)
+      end,
+      fun (Command, Node, Args, Opts) ->
+              action(Command, Node, Args, Opts, PluginsFile, PluginsDir)
+      end, rabbit_plugins_usage).
 
 stop() ->
     ok.
 
 %%----------------------------------------------------------------------------
 
-action(list, [], Opts, PluginsFile, PluginsDir) ->
-    action(list, [".*"], Opts, PluginsFile, PluginsDir);
-action(list, [Pat], Opts, PluginsFile, PluginsDir) ->
-    format_plugins(Pat, Opts, PluginsFile, PluginsDir);
+parse_arguments(CmdLine, NodeStr) ->
+    rabbit_cli:parse_arguments(
+      ?COMMANDS, ?GLOBAL_DEFS(NodeStr), ?NODE_OPT, CmdLine).
 
-action(enable, ToEnable0, _Opts, PluginsFile, PluginsDir) ->
+action(Command, Node, Args, Opts, PluginsFile, PluginsDir) ->
+    All = rabbit_plugins:list(PluginsDir),
+    Enabled = rabbit_plugins:read_enabled(PluginsFile),
+    case Enabled -- plugin_names(All) of
+        []      -> ok;
+        Missing -> io:format("WARNING - plugins currently enabled but "
+                             "missing: ~p~n~n", [Missing])
+    end,
+    Implicit = rabbit_plugins:dependencies(false, Enabled, All),
+    State = #cli{file     = PluginsFile,
+                 dir      = PluginsDir,
+                 all      = All,
+                 enabled  = Enabled,
+                 implicit = Implicit},
+    action(Command, Node, Args, Opts, State).
+
+action(list, Node, [], Opts, State) ->
+    action(list, Node, [".*"], Opts, State);
+action(list, Node, [Pat], Opts, State) ->
+    format_plugins(Node, Pat, Opts, State);
+
+action(enable, Node, ToEnable0, Opts, State = #cli{all      = All,
+                                                   implicit = Implicit,
+                                                   enabled  = Enabled}) ->
     case ToEnable0 of
         [] -> throw({error_string, "Not enough arguments for 'enable'"});
         _  -> ok
     end,
-    AllPlugins = rabbit_plugins:list(PluginsDir),
-    Enabled = rabbit_plugins:read_enabled(PluginsFile),
-    ImplicitlyEnabled = rabbit_plugins:dependencies(false,
-                                                    Enabled, AllPlugins),
     ToEnable = [list_to_atom(Name) || Name <- ToEnable0],
-    Missing = ToEnable -- plugin_names(AllPlugins),
-    NewEnabled = lists:usort(Enabled ++ ToEnable),
-    NewImplicitlyEnabled = rabbit_plugins:dependencies(false,
-                                                       NewEnabled, AllPlugins),
-    MissingDeps = (NewImplicitlyEnabled -- plugin_names(AllPlugins)) -- Missing,
-    case {Missing, MissingDeps} of
-        {[],   []} -> ok;
-        {Miss, []} -> throw({error_string, fmt_missing("plugins",      Miss)});
-        {[], Miss} -> throw({error_string, fmt_missing("dependencies", Miss)});
-        {_,     _} -> throw({error_string,
-                             fmt_missing("plugins", Missing) ++
-                                 fmt_missing("dependencies", MissingDeps)})
+    Missing = ToEnable -- plugin_names(All),
+    case Missing of
+        [] -> ok;
+        _  -> throw({error_string, fmt_missing(Missing)})
     end,
-    write_enabled_plugins(PluginsFile, NewEnabled),
-    case NewEnabled -- ImplicitlyEnabled of
+    NewEnabled = lists:usort(Enabled ++ ToEnable),
+    NewImplicit = write_enabled_plugins(NewEnabled, State),
+    case NewEnabled -- Implicit of
         [] -> io:format("Plugin configuration unchanged.~n");
         _  -> print_list("The following plugins have been enabled:",
-                         NewImplicitlyEnabled -- ImplicitlyEnabled),
-              report_change()
-    end;
+                         NewImplicit -- Implicit)
+    end,
+    action_change(Opts, Node, Implicit, NewImplicit, State);
 
-action(disable, ToDisable0, _Opts, PluginsFile, PluginsDir) ->
+action(set, Node, NewEnabled0, Opts, State = #cli{all      = All,
+                                                  implicit = Implicit}) ->
+    NewEnabled = [list_to_atom(Name) || Name <- NewEnabled0],
+    Missing = NewEnabled -- plugin_names(All),
+    case Missing of
+        [] -> ok;
+        _  -> throw({error_string, fmt_missing(Missing)})
+    end,
+    NewImplicit = write_enabled_plugins(NewEnabled, State),
+    case NewImplicit of
+        [] -> io:format("All plugins are now disabled.~n");
+        _  -> print_list("The following plugins are now enabled:",
+                         NewImplicit)
+    end,
+    action_change(Opts, Node, Implicit, NewImplicit, State);
+
+action(disable, Node, ToDisable0, Opts, State = #cli{all      = All,
+                                                     implicit = Implicit,
+                                                     enabled  = Enabled}) ->
     case ToDisable0 of
         [] -> throw({error_string, "Not enough arguments for 'disable'"});
         _  -> ok
     end,
     ToDisable = [list_to_atom(Name) || Name <- ToDisable0],
-    Enabled = rabbit_plugins:read_enabled(PluginsFile),
-    AllPlugins = rabbit_plugins:list(PluginsDir),
-    Missing = ToDisable -- plugin_names(AllPlugins),
+    Missing = ToDisable -- plugin_names(All),
     case Missing of
         [] -> ok;
         _  -> print_list("Warning: the following plugins could not be found:",
                          Missing)
     end,
-    ToDisableDeps = rabbit_plugins:dependencies(true, ToDisable, AllPlugins),
+    ToDisableDeps = rabbit_plugins:dependencies(true, ToDisable, All),
     NewEnabled = Enabled -- ToDisableDeps,
+    NewImplicit = write_enabled_plugins(NewEnabled, State),
     case length(Enabled) =:= length(NewEnabled) of
         true  -> io:format("Plugin configuration unchanged.~n");
-        false -> ImplicitlyEnabled =
-                     rabbit_plugins:dependencies(false, Enabled, AllPlugins),
-                 NewImplicitlyEnabled =
-                     rabbit_plugins:dependencies(false,
-                                                 NewEnabled, AllPlugins),
-                 print_list("The following plugins have been disabled:",
-                            ImplicitlyEnabled -- NewImplicitlyEnabled),
-                 write_enabled_plugins(PluginsFile, NewEnabled),
-                 report_change()
-    end.
-
-%%----------------------------------------------------------------------------
+        false -> print_list("The following plugins have been disabled:",
+                            Implicit -- NewImplicit)
+    end,
+    action_change(Opts, Node, Implicit, NewImplicit, State);
 
-print_error(Format, Args) ->
-    rabbit_misc:format_stderr("Error: " ++ Format ++ "~n", Args).
+action(sync, Node, [], _Opts, State) ->
+    sync(Node, true, State).
 
-usage() ->
-    io:format("~s", [rabbit_plugins_usage:usage()]),
-    rabbit_misc:quit(1).
+%%----------------------------------------------------------------------------
 
 %% Pretty print a list of plugins.
-format_plugins(Pattern, Opts, PluginsFile, PluginsDir) ->
+format_plugins(Node, Pattern, Opts, #cli{all      = All,
+                                         enabled  = Enabled,
+                                         implicit = Implicit}) ->
     Verbose = proplists:get_bool(?VERBOSE_OPT, Opts),
     Minimal = proplists:get_bool(?MINIMAL_OPT, Opts),
     Format = case {Verbose, Minimal} of
@@ -180,43 +167,50 @@ format_plugins(Pattern, Opts, PluginsFile, PluginsDir) ->
     OnlyEnabled    = proplists:get_bool(?ENABLED_OPT,     Opts),
     OnlyEnabledAll = proplists:get_bool(?ENABLED_ALL_OPT, Opts),
 
-    AvailablePlugins = rabbit_plugins:list(PluginsDir),
-    EnabledExplicitly = rabbit_plugins:read_enabled(PluginsFile),
-    EnabledImplicitly =
-        rabbit_plugins:dependencies(false, EnabledExplicitly,
-                                    AvailablePlugins) -- EnabledExplicitly,
-    Missing = [#plugin{name = Name, dependencies = []} ||
-                  Name <- ((EnabledExplicitly ++ EnabledImplicitly) --
-                               plugin_names(AvailablePlugins))],
+    EnabledImplicitly = Implicit -- Enabled,
+    {StatusMsg, Running} =
+        case rabbit_cli:rpc_call(Node, rabbit_plugins, active, []) of
+            {badrpc, _} -> {"[failed to contact ~s - status not shown]", []};
+            Active      -> {"* = running on ~s", Active}
+        end,
     {ok, RE} = re:compile(Pattern),
     Plugins = [ Plugin ||
-                  Plugin = #plugin{name = Name} <- AvailablePlugins ++ Missing,
+                  Plugin = #plugin{name = Name} <- All,
                   re:run(atom_to_list(Name), RE, [{capture, none}]) =:= match,
-                  if OnlyEnabled    ->  lists:member(Name, EnabledExplicitly);
-                     OnlyEnabledAll -> (lists:member(Name,
-                                                     EnabledExplicitly) or
-                                        lists:member(Name, EnabledImplicitly));
+                  if OnlyEnabled    -> lists:member(Name, Enabled);
+                     OnlyEnabledAll -> lists:member(Name, Enabled) or
+                                           lists:member(Name,EnabledImplicitly);
                      true           -> true
                   end],
     Plugins1 = usort_plugins(Plugins),
     MaxWidth = lists:max([length(atom_to_list(Name)) ||
                              #plugin{name = Name} <- Plugins1] ++ [0]),
-    [format_plugin(P, EnabledExplicitly, EnabledImplicitly,
-                   plugin_names(Missing), Format, MaxWidth) || P <- Plugins1],
+    case Format of
+        minimal -> ok;
+        _       -> io:format(" Configured: E = explicitly enabled; "
+                             "e = implicitly enabled~n"
+                             " | Status:   ~s~n"
+                             " |/~n", [rabbit_misc:format(StatusMsg, [Node])])
+    end,
+    [format_plugin(P, Enabled, EnabledImplicitly, Running,
+                   Format, MaxWidth) || P <- Plugins1],
     ok.
 
 format_plugin(#plugin{name = Name, version = Version,
                       description = Description, dependencies = Deps},
-              EnabledExplicitly, EnabledImplicitly, Missing,
-              Format, MaxWidth) ->
-    Glyph = case {lists:member(Name, EnabledExplicitly),
-                  lists:member(Name, EnabledImplicitly),
-                  lists:member(Name, Missing)} of
-                {true, false, false} -> "[E]";
-                {false, true, false} -> "[e]";
-                {_,        _,  true} -> "[!]";
-                _                    -> "[ ]"
-            end,
+              Enabled, EnabledImplicitly, Running, Format,
+              MaxWidth) ->
+    EnabledGlyph = case {lists:member(Name, Enabled),
+                         lists:member(Name, EnabledImplicitly)} of
+                       {true, false} -> "E";
+                       {false, true} -> "e";
+                       _             -> " "
+                   end,
+    RunningGlyph = case lists:member(Name, Running) of
+                       true  -> "*";
+                       false -> " "
+                   end,
+    Glyph = rabbit_misc:format("[~s~s]", [EnabledGlyph, RunningGlyph]),
     Opt = fun (_F, A, A) -> ok;
               ( F, A, _) -> io:format(F, [A])
           end,
@@ -227,9 +221,9 @@ format_plugin(#plugin{name = Name, version = Version,
                    Opt("~s", Version, undefined),
                    io:format("~n");
         verbose -> io:format("~s ~w~n", [Glyph, Name]),
-                   Opt("    Version:     \t~s~n", Version,     undefined),
-                   Opt("    Dependencies:\t~p~n", Deps,        []),
-                   Opt("    Description: \t~s~n", Description, undefined),
+                   Opt("     Version:     \t~s~n", Version,     undefined),
+                   Opt("     Dependencies:\t~p~n", Deps,        []),
+                   Opt("     Description: \t~s~n", Description, undefined),
                    io:format("~n")
     end.
 
@@ -240,8 +234,8 @@ fmt_list(Header, Plugins) ->
     lists:flatten(
       [Header, $\n, [io_lib:format("  ~s~n", [P]) || P <- Plugins]]).
 
-fmt_missing(Desc, Missing) ->
-    fmt_list("The following " ++ Desc ++ " could not be found:", Missing).
+fmt_missing(Missing) ->
+    fmt_list("The following plugins could not be found:", Missing).
 
 usort_plugins(Plugins) ->
     lists:usort(fun plugins_cmp/2, Plugins).
@@ -255,13 +249,59 @@ plugin_names(Plugins) ->
     [Name || #plugin{name = Name} <- Plugins].
 
 %% Write the enabled plugin names on disk.
-write_enabled_plugins(PluginsFile, Plugins) ->
-    case rabbit_file:write_term_file(PluginsFile, [Plugins]) of
-        ok              -> ok;
+write_enabled_plugins(Plugins, #cli{file = File,
+                                    all  = All}) ->
+    case rabbit_file:write_term_file(File, [Plugins]) of
+        ok              -> rabbit_plugins:dependencies(false, Plugins, All);
         {error, Reason} -> throw({error, {cannot_write_enabled_plugins_file,
-                                          PluginsFile, Reason}})
+                                          File, Reason}})
+    end.
+
+action_change(Opts, Node, Old, New, State) ->
+    action_change0(proplists:get_bool(?OFFLINE_OPT, Opts),
+                   proplists:get_bool(?ONLINE_OPT, Opts),
+                   Node, Old, New, State).
+
+action_change0(true, _Online, _Node, Same, Same, _State) ->
+    %% Definitely nothing to do
+    ok;
+action_change0(true, _Online, _Node, _Old, _New, _State) ->
+    io:format("Offline change; changes will take effect at broker restart.~n");
+action_change0(false, Online, Node, _Old, _New, State) ->
+    sync(Node, Online, State).
+
+sync(Node, ForceOnline, #cli{file = File}) ->
+    rpc_call(Node, ForceOnline, rabbit_plugins, ensure, [File]).
+
+rpc_call(Node, Online, Mod, Fun, Args) ->
+    io:format("~nApplying plugin configuration to ~s...", [Node]),
+    case rabbit_cli:rpc_call(Node, Mod, Fun, Args) of
+        {ok, [], []} ->
+            io:format(" nothing to do.~n", []);
+        {ok, Start, []} ->
+            io:format(" started ~b plugin~s.~n", [length(Start), plur(Start)]);
+        {ok, [], Stop} ->
+            io:format(" stopped ~b plugin~s.~n", [length(Stop), plur(Stop)]);
+        {ok, Start, Stop} ->
+            io:format(" stopped ~b plugin~s and started ~b plugin~s.~n",
+                      [length(Stop), plur(Stop), length(Start), plur(Start)]);
+        {badrpc, nodedown} = Error ->
+            io:format(" failed.~n", []),
+            case Online of
+                true  -> Error;
+                false -> io:format(
+                           " * Could not contact node ~s.~n"
+                           "   Changes will take effect at broker restart.~n"
+                           " * Options: --online  - fail if broker cannot be "
+                           "contacted.~n"
+                           "            --offline - do not try to contact "
+                           "broker.~n",
+                           [Node])
+            end;
+        Error ->
+            io:format(" failed.~n", []),
+            Error
     end.
 
-report_change() ->
-    io:format("Plugin configuration has changed. "
-              "Restart RabbitMQ for changes to take effect.~n").
+plur([_]) -> "";
+plur(_)   -> "s".
index fe2b766f30661bc01c07277d3e3dd205dabaf4b5..65f3801e3e668c7e39e9956541a79f0fd91482a5 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_policies).
@@ -34,7 +34,8 @@ register() ->
                           {policy_validator, <<"dead-letter-routing-key">>},
                           {policy_validator, <<"message-ttl">>},
                           {policy_validator, <<"expires">>},
-                          {policy_validator, <<"max-length">>}]],
+                          {policy_validator, <<"max-length">>},
+                          {policy_validator, <<"max-length-bytes">>}]],
     ok.
 
 validate_policy(Terms) ->
@@ -61,13 +62,13 @@ validate_policy0(<<"dead-letter-routing-key">>, Value) ->
     {error, "~p is not a valid dead letter routing key", [Value]};
 
 validate_policy0(<<"message-ttl">>, Value)
-  when is_integer(Value), Value >= 0, Value =< ?MAX_EXPIRY_TIMER ->
+  when is_integer(Value), Value >= 0 ->
     ok;
 validate_policy0(<<"message-ttl">>, Value) ->
     {error, "~p is not a valid message TTL", [Value]};
 
 validate_policy0(<<"expires">>, Value)
-  when is_integer(Value), Value >= 1, Value =< ?MAX_EXPIRY_TIMER ->
+  when is_integer(Value), Value >= 1 ->
     ok;
 validate_policy0(<<"expires">>, Value) ->
     {error, "~p is not a valid queue expiry", [Value]};
@@ -76,6 +77,10 @@ validate_policy0(<<"max-length">>, Value)
   when is_integer(Value), Value >= 0 ->
     ok;
 validate_policy0(<<"max-length">>, Value) ->
-    {error, "~p is not a valid maximum length", [Value]}.
-
+    {error, "~p is not a valid maximum length", [Value]};
 
+validate_policy0(<<"max-length-bytes">>, Value)
+  when is_integer(Value), Value >= 0 ->
+    ok;
+validate_policy0(<<"max-length-bytes">>, Value) ->
+    {error, "~p is not a valid maximum length in bytes", [Value]}.
index 0a69fb325b99c651e4b9eba4b1a326c666e9df66..5bf5483272432e7930c56f54440dd4a191d48bbe 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_policy).
@@ -46,17 +46,11 @@ name(#exchange{policy = Policy}) -> name0(Policy).
 name0(undefined) -> none;
 name0(Policy)    -> pget(name, Policy).
 
-set(Q = #amqqueue{name = Name}) -> rabbit_queue_decorator:set(
-                                     Q#amqqueue{policy = set0(Name)});
-set(X = #exchange{name = Name}) -> rabbit_exchange_decorator:set(
-                                     X#exchange{policy = set0(Name)}).
+set(Q = #amqqueue{name = Name}) -> Q#amqqueue{policy = set0(Name)};
+set(X = #exchange{name = Name}) -> X#exchange{policy = set0(Name)}.
 
 set0(Name = #resource{virtual_host = VHost}) -> match(Name, list(VHost)).
 
-set(Q = #amqqueue{name = Name}, Ps) -> Q#amqqueue{policy = match(Name, Ps)};
-set(X = #exchange{name = Name}, Ps) -> rabbit_exchange_decorator:set(
-                                         X#exchange{policy = match(Name, Ps)}).
-
 get(Name, #amqqueue{policy = Policy}) -> get0(Name, Policy);
 get(Name, #exchange{policy = Policy}) -> get0(Name, Policy);
 %% Caution - SLOW.
@@ -104,12 +98,18 @@ recover0() ->
     Policies = list(),
     [rabbit_misc:execute_mnesia_transaction(
        fun () ->
-               mnesia:write(rabbit_durable_exchange, set(X, Policies), write)
-       end) || X <- Xs],
+               mnesia:write(
+                 rabbit_durable_exchange,
+                 rabbit_exchange_decorator:set(
+                   X#exchange{policy = match(Name, Policies)}), write)
+       end) || X = #exchange{name = Name} <- Xs],
     [rabbit_misc:execute_mnesia_transaction(
        fun () ->
-               mnesia:write(rabbit_durable_queue, set(Q, Policies), write)
-       end) || Q <- Qs],
+               mnesia:write(
+                 rabbit_durable_queue,
+                 rabbit_queue_decorator:set(
+                   Q#amqqueue{policy = match(Name, Policies)}), write)
+       end) || Q = #amqqueue{name = Name} <- Qs],
     ok.
 
 invalid_file() ->
index dd0520891cd6a54247ed1abab8b5115cfc62e946..7ebea83516bfed3a75931aaa62e0d4bb292e4e55 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_policy_validator).
index 6a6a4ee680c3ee645bde460842882cfc38bebb3f..13455abb0a8831bdbef0905207db4e3c7c7cb5ee 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_prelaunch).
@@ -22,9 +22,9 @@
 
 -include("rabbit.hrl").
 
--define(DIST_PORT_NOT_CONFIGURED, 0).
+-define(SET_DIST_PORT, 0).
 -define(ERROR_CODE, 1).
--define(DIST_PORT_CONFIGURED, 2).
+-define(DO_NOT_SET_DIST_PORT, 2).
 
 %%----------------------------------------------------------------------------
 %% Specs
@@ -46,13 +46,14 @@ start() ->
             {NodeName, NodeHost} = rabbit_nodes:parts(Node),
             ok = duplicate_node_check(NodeName, NodeHost),
             ok = dist_port_set_check(),
+            ok = dist_port_range_check(),
             ok = dist_port_use_check(NodeHost);
         [] ->
             %% Ignore running node while installing windows service
             ok = dist_port_set_check(),
             ok
     end,
-    rabbit_misc:quit(?DIST_PORT_NOT_CONFIGURED),
+    rabbit_misc:quit(?SET_DIST_PORT),
     ok.
 
 stop() ->
@@ -88,7 +89,7 @@ dist_port_set_check() ->
                     case {pget(inet_dist_listen_min, Kernel, none),
                           pget(inet_dist_listen_max, Kernel, none)} of
                         {none, none} -> ok;
-                        _            -> rabbit_misc:quit(?DIST_PORT_CONFIGURED)
+                        _            -> rabbit_misc:quit(?DO_NOT_SET_DIST_PORT)
                     end;
                 {ok, _} ->
                     ok;
@@ -97,6 +98,17 @@ dist_port_set_check() ->
             end
     end.
 
+dist_port_range_check() ->
+    case os:getenv("RABBITMQ_DIST_PORT") of
+        false   -> ok;
+        PortStr -> case catch list_to_integer(PortStr) of
+                       Port when is_integer(Port) andalso Port > 65535 ->
+                           rabbit_misc:quit(?DO_NOT_SET_DIST_PORT);
+                       _ ->
+                           ok
+                   end
+    end.
+
 dist_port_use_check(NodeHost) ->
     case os:getenv("RABBITMQ_DIST_PORT") of
         false   -> ok;
diff --git a/rabbitmq-server/src/rabbit_prequeue.erl b/rabbitmq-server/src/rabbit_prequeue.erl
new file mode 100644 (file)
index 0000000..af96ea9
--- /dev/null
@@ -0,0 +1,104 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_prequeue).
+
+%% This is the initial gen_server that all queue processes start off
+%% as. It handles the decision as to whether we need to start a new
+%% slave, a new master/unmirrored, or whether we are restarting (and
+%% if so, as what). Thus a crashing queue process can restart from here
+%% and always do the right thing.
+
+-export([start_link/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3]).
+
+-behaviour(gen_server2).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-ifdef(use_specs).
+
+-export_type([start_mode/0]).
+
+-type(start_mode() :: 'declare' | 'recovery' | 'slave').
+
+-spec(start_link/3 :: (rabbit_types:amqqueue(), start_mode(), pid())
+                      -> rabbit_types:ok_pid_or_error()).
+
+-endif.
+
+%%----------------------------------------------------------------------------
+
+start_link(Q, StartMode, Marker) ->
+    gen_server2:start_link(?MODULE, {Q, StartMode, Marker}, []).
+
+%%----------------------------------------------------------------------------
+
+init({Q, StartMode, Marker}) ->
+    init(Q, case {is_process_alive(Marker), StartMode} of
+                {true,  slave} -> slave;
+                {true,  _}     -> master;
+                {false, _}     -> restart
+            end).
+
+init(Q, master) -> rabbit_amqqueue_process:init(Q);
+init(Q, slave)  -> rabbit_mirror_queue_slave:init(Q);
+
+init(#amqqueue{name = QueueName}, restart) ->
+    {ok, Q = #amqqueue{pid        = QPid,
+                       slave_pids = SPids}} = rabbit_amqqueue:lookup(QueueName),
+    LocalOrMasterDown = node(QPid) =:= node()
+        orelse not rabbit_mnesia:on_running_node(QPid),
+    Slaves = [SPid || SPid <- SPids, rabbit_mnesia:is_process_alive(SPid)],
+    case rabbit_mnesia:is_process_alive(QPid) of
+        true  -> false = LocalOrMasterDown, %% assertion
+                 rabbit_mirror_queue_slave:go(self(), async),
+                 rabbit_mirror_queue_slave:init(Q); %% [1]
+        false -> case LocalOrMasterDown andalso Slaves =:= [] of
+                     true  -> crash_restart(Q);     %% [2]
+                     false -> timer:sleep(25),
+                              init(Q, restart)      %% [3]
+                 end
+    end.
+%% [1] There is a master on another node. Regardless of whether we
+%%     were originally a master or a slave, we are now a new slave.
+%%
+%% [2] Nothing is alive. We are the last best hope. Try to restart as a master.
+%%
+%% [3] The current master is dead but either there are alive slaves to
+%%     take over or it's all happening on a different node anyway. This is
+%%     not a stable situation. Sleep and wait for somebody else to make a
+%%     move.
+
+crash_restart(Q = #amqqueue{name = QueueName}) ->
+    rabbit_log:error("Restarting crashed ~s.~n", [rabbit_misc:rs(QueueName)]),
+    gen_server2:cast(self(), init),
+    rabbit_amqqueue_process:init(Q#amqqueue{pid = self()}).
+
+%%----------------------------------------------------------------------------
+
+%% This gen_server2 always hands over to some other module at the end
+%% of init/1.
+handle_call(_Msg, _From, _State)     -> exit(unreachable).
+handle_cast(_Msg, _State)            -> exit(unreachable).
+handle_info(_Msg, _State)            -> exit(unreachable).
+terminate(_Reason, _State)           -> exit(unreachable).
+code_change(_OldVsn, _State, _Extra) -> exit(unreachable).
+
diff --git a/rabbitmq-server/src/rabbit_priority_queue.erl b/rabbitmq-server/src/rabbit_priority_queue.erl
new file mode 100644 (file)
index 0000000..206d674
--- /dev/null
@@ -0,0 +1,584 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is GoPivotal, Inc.
+%%  Copyright (c) 2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_priority_queue).
+
+-include_lib("rabbit.hrl").
+-include_lib("rabbit_framing.hrl").
+-behaviour(rabbit_backing_queue).
+
+%% enabled unconditionally. Disabling priority queueing after
+%% it has been enabled is dangerous.
+-rabbit_boot_step({?MODULE,
+                   [{description, "enable priority queue"},
+                    {mfa,         {?MODULE, enable, []}},
+                    {requires,    pre_boot},
+                    {enables,     kernel_ready}]}).
+
+-export([enable/0]).
+
+-export([start/1, stop/0]).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+         purge/1, purge_acks/1,
+         publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
+         dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+         ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+         set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+         handle_pre_hibernate/1, resume/1, msg_rates/1,
+         info/2, invoke/3, is_duplicate/2]).
+
+-record(state, {bq, bqss}).
+-record(passthrough, {bq, bqs}).
+
+%% See 'note on suffixes' below
+-define(passthrough1(F), State#passthrough{bqs = BQ:F}).
+-define(passthrough2(F),
+        {Res, BQS1} = BQ:F, {Res, State#passthrough{bqs = BQS1}}).
+-define(passthrough3(F),
+        {Res1, Res2, BQS1} = BQ:F, {Res1, Res2, State#passthrough{bqs = BQS1}}).
+
+%% This module adds suport for priority queues.
+%%
+%% Priority queues have one backing queue per priority. Backing queue functions
+%% then produce a list of results for each BQ and fold over them, sorting
+%% by priority.
+%%
+%%For queues that do not
+%% have priorities enabled, the functions in this module delegate to
+%% their "regular" backing queue module counterparts. See the `passthrough`
+%% record and passthrough{1,2,3} macros.
+%%
+%% Delivery to consumers happens by first "running" the queue with
+%% the highest priority until there are no more messages to deliver,
+%% then the next one, and so on. This offers good prioritisation
+%% but may result in lower priority messages not being delivered
+%% when there's a high ingress rate of messages with higher priority.
+
+enable() ->
+    {ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
+    case RealBQ of
+        ?MODULE -> ok;
+        _       -> rabbit_log:info("Priority queues enabled, real BQ is ~s~n",
+                                   [RealBQ]),
+                   application:set_env(
+                     rabbitmq_priority_queue, backing_queue_module, RealBQ),
+                   application:set_env(rabbit, backing_queue_module, ?MODULE)
+    end.
+
+%%----------------------------------------------------------------------------
+
+start(QNames) ->
+    BQ = bq(),
+    %% TODO this expand-collapse dance is a bit ridiculous but it's what
+    %% rabbit_amqqueue:recover/0 expects. We could probably simplify
+    %% this if we rejigged recovery a bit.
+    {DupNames, ExpNames} = expand_queues(QNames),
+    case BQ:start(ExpNames) of
+        {ok, ExpRecovery} ->
+            {ok, collapse_recovery(QNames, DupNames, ExpRecovery)};
+        Else ->
+            Else
+    end.
+
+stop() ->
+    BQ = bq(),
+    BQ:stop().
+
+%%----------------------------------------------------------------------------
+
+mutate_name(P, Q = #amqqueue{name = QName = #resource{name = QNameBin}}) ->
+    Q#amqqueue{name = QName#resource{name = mutate_name_bin(P, QNameBin)}}.
+
+mutate_name_bin(P, NameBin) -> <<NameBin/binary, 0, P:8>>.
+
+expand_queues(QNames) ->
+    lists:unzip(
+      lists:append([expand_queue(QName) || QName <- QNames])).
+
+expand_queue(QName = #resource{name = QNameBin}) ->
+    {ok, Q} = rabbit_misc:dirty_read({rabbit_durable_queue, QName}),
+    case priorities(Q) of
+        none -> [{QName, QName}];
+        Ps   -> [{QName, QName#resource{name = mutate_name_bin(P, QNameBin)}}
+                   || P <- Ps]
+    end.
+
+collapse_recovery(QNames, DupNames, Recovery) ->
+    NameToTerms = lists:foldl(fun({Name, RecTerm}, Dict) ->
+                                      dict:append(Name, RecTerm, Dict)
+                              end, dict:new(), lists:zip(DupNames, Recovery)),
+    [dict:fetch(Name, NameToTerms) || Name <- QNames].
+
+priorities(#amqqueue{arguments = Args}) ->
+    Ints = [long, short, signedint, byte],
+    case rabbit_misc:table_lookup(Args, <<"x-max-priority">>) of
+        {Type, Max} -> case lists:member(Type, Ints) of
+                           false -> none;
+                           true  -> lists:reverse(lists:seq(0, Max))
+                       end;
+        _           -> none
+    end.
+
+%%----------------------------------------------------------------------------
+
+init(Q, Recover, AsyncCallback) ->
+    BQ = bq(),
+    case priorities(Q) of
+        none -> RealRecover = case Recover of
+                                  [R] -> R; %% [0]
+                                  R   -> R
+                              end,
+                #passthrough{bq  = BQ,
+                             bqs = BQ:init(Q, RealRecover, AsyncCallback)};
+        Ps   -> Init = fun (P, Term) ->
+                               BQ:init(
+                                 mutate_name(P, Q), Term,
+                                 fun (M, F) -> AsyncCallback(M, {P, F}) end)
+                       end,
+                BQSs = case have_recovery_terms(Recover) of
+                           false -> [{P, Init(P, Recover)} || P <- Ps];
+                           _     -> PsTerms = lists:zip(Ps, Recover),
+                                    [{P, Init(P, Term)} || {P, Term} <- PsTerms]
+                       end,
+                #state{bq   = BQ,
+                       bqss = BQSs}
+    end.
+%% [0] collapse_recovery has the effect of making a list of recovery
+%% terms in priority order, even for non priority queues. It's easier
+%% to do that and "unwrap" in init/3 than to have collapse_recovery be
+%% aware of non-priority queues.
+
+have_recovery_terms(new)                -> false;
+have_recovery_terms(non_clean_shutdown) -> false;
+have_recovery_terms(_)                  -> true.
+
+terminate(Reason, State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) -> BQ:terminate(Reason, BQSN) end, State);
+terminate(Reason, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(terminate(Reason, BQS)).
+
+delete_and_terminate(Reason, State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) ->
+                     BQ:delete_and_terminate(Reason, BQSN)
+             end, State);
+delete_and_terminate(Reason, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(delete_and_terminate(Reason, BQS)).
+
+delete_crashed(Q) ->
+    BQ = bq(),
+    case priorities(Q) of
+        none -> BQ:delete_crashed(Q);
+        Ps   -> [BQ:delete_crashed(mutate_name(P, Q)) || P <- Ps]
+    end.
+
+purge(State = #state{bq = BQ}) ->
+    fold_add2(fun (_P, BQSN) -> BQ:purge(BQSN) end, State);
+purge(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(purge(BQS)).
+
+purge_acks(State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) -> BQ:purge_acks(BQSN) end, State);
+purge_acks(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(purge_acks(BQS)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State = #state{bq = BQ}) ->
+    pick1(fun (_P, BQSN) ->
+                  BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQSN)
+          end, Msg, State);
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
+        State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State = #state{bq = BQ}) ->
+    pick2(fun (P, BQSN) ->
+                  {AckTag, BQSN1} = BQ:publish_delivered(
+                                      Msg, MsgProps, ChPid, Flow, BQSN),
+                  {{P, AckTag}, BQSN1}
+          end, Msg, State);
+publish_delivered(Msg, MsgProps, ChPid, Flow,
+                  State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
+
+%% TODO this is a hack. The BQ api does not give us enough information
+%% here - if we had the Msg we could look at its priority and forward
+%% to the appropriate sub-BQ. But we don't so we are stuck.
+%%
+%% But fortunately VQ ignores discard/4, so we can too, *assuming we
+%% are talking to VQ*. discard/4 is used by HA, but that's "above" us
+%% (if in use) so we don't break that either, just some hypothetical
+%% alternate BQ implementation.
+discard(_MsgId, _ChPid, _Flow, State = #state{}) ->
+    State;
+    %% We should have something a bit like this here:
+    %% pick1(fun (_P, BQSN) ->
+    %%               BQ:discard(MsgId, ChPid, Flow, BQSN)
+    %%       end, Msg, State);
+discard(MsgId, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(discard(MsgId, ChPid, Flow, BQS)).
+
+drain_confirmed(State = #state{bq = BQ}) ->
+    fold_append2(fun (_P, BQSN) -> BQ:drain_confirmed(BQSN) end, State);
+drain_confirmed(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(drain_confirmed(BQS)).
+
+dropwhile(Pred, State = #state{bq = BQ}) ->
+    find2(fun (_P, BQSN) -> BQ:dropwhile(Pred, BQSN) end, undefined, State);
+dropwhile(Pred, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(dropwhile(Pred, BQS)).
+
+%% TODO this is a bit nasty. In the one place where fetchwhile/4 is
+%% actually used the accumulator is a list of acktags, which of course
+%% we need to mutate - so we do that although we are encoding an
+%% assumption here.
+fetchwhile(Pred, Fun, Acc, State = #state{bq = BQ}) ->
+    findfold3(
+      fun (P, BQSN, AccN) ->
+              {Res, AccN1, BQSN1} = BQ:fetchwhile(Pred, Fun, AccN, BQSN),
+              {Res, priority_on_acktags(P, AccN1), BQSN1}
+      end, Acc, undefined, State);
+fetchwhile(Pred, Fun, Acc, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough3(fetchwhile(Pred, Fun, Acc, BQS)).
+
+fetch(AckRequired, State = #state{bq = BQ}) ->
+    find2(
+      fun (P, BQSN) ->
+              case BQ:fetch(AckRequired, BQSN) of
+                  {empty,            BQSN1} -> {empty, BQSN1};
+                  {{Msg, Del, ATag}, BQSN1} -> {{Msg, Del, {P, ATag}}, BQSN1}
+              end
+      end, empty, State);
+fetch(AckRequired, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(fetch(AckRequired, BQS)).
+
+drop(AckRequired, State = #state{bq = BQ}) ->
+    find2(fun (P, BQSN) ->
+                  case BQ:drop(AckRequired, BQSN) of
+                      {empty,           BQSN1} -> {empty, BQSN1};
+                      {{MsgId, AckTag}, BQSN1} -> {{MsgId, {P, AckTag}}, BQSN1}
+                  end
+          end, empty, State);
+drop(AckRequired, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(drop(AckRequired, BQS)).
+
+ack(AckTags, State = #state{bq = BQ}) ->
+    fold_by_acktags2(fun (AckTagsN, BQSN) ->
+                             BQ:ack(AckTagsN, BQSN)
+                     end, AckTags, State);
+ack(AckTags, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(ack(AckTags, BQS)).
+
+requeue(AckTags, State = #state{bq = BQ}) ->
+    fold_by_acktags2(fun (AckTagsN, BQSN) ->
+                             BQ:requeue(AckTagsN, BQSN)
+                     end, AckTags, State);
+requeue(AckTags, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(requeue(AckTags, BQS)).
+
+%% Similar problem to fetchwhile/4
+ackfold(MsgFun, Acc, State = #state{bq = BQ}, AckTags) ->
+    AckTagsByPriority = partition_acktags(AckTags),
+    fold2(
+      fun (P, BQSN, AccN) ->
+              case orddict:find(P, AckTagsByPriority) of
+                  {ok, ATagsN} -> {AccN1, BQSN1} =
+                                      BQ:ackfold(MsgFun, AccN, BQSN, ATagsN),
+                                  {priority_on_acktags(P, AccN1), BQSN1};
+                  error        -> {AccN, BQSN}
+              end
+      end, Acc, State);
+ackfold(MsgFun, Acc, State = #passthrough{bq = BQ, bqs = BQS}, AckTags) ->
+    ?passthrough2(ackfold(MsgFun, Acc, BQS, AckTags)).
+
+fold(Fun, Acc, State = #state{bq = BQ}) ->
+    fold2(fun (_P, BQSN, AccN) -> BQ:fold(Fun, AccN, BQSN) end, Acc, State);
+fold(Fun, Acc, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(fold(Fun, Acc, BQS)).
+
+len(#state{bq = BQ, bqss = BQSs}) ->
+    add0(fun (_P, BQSN) -> BQ:len(BQSN) end, BQSs);
+len(#passthrough{bq = BQ, bqs = BQS}) ->
+    BQ:len(BQS).
+
+is_empty(#state{bq = BQ, bqss = BQSs}) ->
+    all0(fun (_P, BQSN) -> BQ:is_empty(BQSN) end, BQSs);
+is_empty(#passthrough{bq = BQ, bqs = BQS}) ->
+    BQ:is_empty(BQS).
+
+depth(#state{bq = BQ, bqss = BQSs}) ->
+    add0(fun (_P, BQSN) -> BQ:depth(BQSN) end, BQSs);
+depth(#passthrough{bq = BQ, bqs = BQS}) ->
+    BQ:depth(BQS).
+
+set_ram_duration_target(DurationTarget, State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) ->
+                     BQ:set_ram_duration_target(DurationTarget, BQSN)
+             end, State);
+set_ram_duration_target(DurationTarget,
+                        State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(set_ram_duration_target(DurationTarget, BQS)).
+
+ram_duration(State = #state{bq = BQ}) ->
+    fold_min2(fun (_P, BQSN) -> BQ:ram_duration(BQSN) end, State);
+ram_duration(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(ram_duration(BQS)).
+
+needs_timeout(#state{bq = BQ, bqss = BQSs}) ->
+    fold0(fun (_P, _BQSN, timed) -> timed;
+              (_P, BQSN,  idle)  -> case BQ:needs_timeout(BQSN) of
+                                        timed -> timed;
+                                        _     -> idle
+                                    end;
+              (_P, BQSN,  false) -> BQ:needs_timeout(BQSN)
+          end, false, BQSs);
+needs_timeout(#passthrough{bq = BQ, bqs = BQS}) ->
+    BQ:needs_timeout(BQS).
+
+timeout(State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) -> BQ:timeout(BQSN) end, State);
+timeout(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(timeout(BQS)).
+
+handle_pre_hibernate(State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) ->
+                  BQ:handle_pre_hibernate(BQSN)
+          end, State);
+handle_pre_hibernate(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(handle_pre_hibernate(BQS)).
+
+resume(State = #state{bq = BQ}) ->
+    foreach1(fun (_P, BQSN) -> BQ:resume(BQSN) end, State);
+resume(State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(resume(BQS)).
+
+msg_rates(#state{bq = BQ, bqss = BQSs}) ->
+    fold0(fun(_P, BQSN, {InN, OutN}) ->
+                  {In, Out} = BQ:msg_rates(BQSN),
+                  {InN + In, OutN + Out}
+          end, {0.0, 0.0}, BQSs);
+msg_rates(#passthrough{bq = BQ, bqs = BQS}) ->
+    BQ:msg_rates(BQS).
+
+info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) ->
+    fold0(fun (P, BQSN, Acc) ->
+                  combine_status(P, BQ:info(backing_queue_status, BQSN), Acc)
+          end, nothing, BQSs);
+info(Item, #state{bq = BQ, bqss = BQSs}) ->
+    fold0(fun (_P, BQSN, Acc) ->
+                  Acc + BQ:info(Item, BQSN)
+          end, 0, BQSs);
+info(Item, #passthrough{bq = BQ, bqs = BQS}) ->
+    BQ:info(Item, BQS).
+
+invoke(Mod, {P, Fun}, State = #state{bq = BQ}) ->
+    pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
+invoke(Mod, Fun, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough1(invoke(Mod, Fun, BQS)).
+
+is_duplicate(Msg, State = #state{bq = BQ}) ->
+    pick2(fun (_P, BQSN) -> BQ:is_duplicate(Msg, BQSN) end, Msg, State);
+is_duplicate(Msg, State = #passthrough{bq = BQ, bqs = BQS}) ->
+    ?passthrough2(is_duplicate(Msg, BQS)).
+
+%%----------------------------------------------------------------------------
+
+bq() ->
+    {ok, RealBQ} = application:get_env(
+                     rabbitmq_priority_queue, backing_queue_module),
+    RealBQ.
+
+%% Note on suffixes: Many utility functions here have suffixes telling
+%% you the arity of the return type of the BQ function they are
+%% designed to work with.
+%%
+%% 0 - BQ function returns a value and does not modify state
+%% 1 - BQ function just returns a new state
+%% 2 - BQ function returns a 2-tuple of {Result, NewState}
+%% 3 - BQ function returns a 3-tuple of {Result1, Result2, NewState}
+
+%% Fold over results
+fold0(Fun,  Acc, [{P, BQSN} | Rest]) -> fold0(Fun, Fun(P, BQSN, Acc), Rest);
+fold0(_Fun, Acc, [])                 -> Acc.
+
+%% Do all BQs match?
+all0(Pred, BQSs) -> fold0(fun (_P, _BQSN, false) -> false;
+                              (P,  BQSN,  true)  -> Pred(P, BQSN)
+                          end, true, BQSs).
+
+%% Sum results
+add0(Fun, BQSs) -> fold0(fun (P, BQSN, Acc) -> Acc + Fun(P, BQSN) end, 0, BQSs).
+
+%% Apply for all states
+foreach1(Fun, State = #state{bqss = BQSs}) ->
+    a(State#state{bqss = foreach1(Fun, BQSs, [])}).
+foreach1(Fun, [{P, BQSN} | Rest], BQSAcc) ->
+    BQSN1 = Fun(P, BQSN),
+    foreach1(Fun, Rest, [{P, BQSN1} | BQSAcc]);
+foreach1(_Fun, [], BQSAcc) ->
+    lists:reverse(BQSAcc).
+
+%% For a given thing, just go to its BQ
+pick1(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
+    {P, BQSN} = priority(Prioritisable, BQSs),
+    a(State#state{bqss = bq_store(P, Fun(P, BQSN), BQSs)}).
+
+%% Fold over results
+fold2(Fun, Acc, State = #state{bqss = BQSs}) ->
+    {Res, BQSs1} = fold2(Fun, Acc, BQSs, []),
+    {Res, a(State#state{bqss = BQSs1})}.
+fold2(Fun, Acc, [{P, BQSN} | Rest], BQSAcc) ->
+    {Acc1, BQSN1} = Fun(P, BQSN, Acc),
+    fold2(Fun, Acc1, Rest, [{P, BQSN1} | BQSAcc]);
+fold2(_Fun, Acc, [], BQSAcc) ->
+    {Acc, lists:reverse(BQSAcc)}.
+
+%% Fold over results assuming results are lists and we want to append them
+fold_append2(Fun, State) ->
+    fold2(fun (P, BQSN, Acc) ->
+                  {Res, BQSN1} = Fun(P, BQSN),
+                  {Res ++ Acc, BQSN1}
+          end, [], State).
+
+%% Fold over results assuming results are numbers and we want to sum them
+fold_add2(Fun, State) ->
+    fold2(fun (P, BQSN, Acc) ->
+                  {Res, BQSN1} = Fun(P, BQSN),
+                  {add_maybe_infinity(Res, Acc), BQSN1}
+          end, 0, State).
+
+%% Fold over results assuming results are numbers and we want the minimum
+fold_min2(Fun, State) ->
+    fold2(fun (P, BQSN, Acc) ->
+                  {Res, BQSN1} = Fun(P, BQSN),
+                  {erlang:min(Res, Acc), BQSN1}
+          end, infinity, State).
+
+%% Fold over results assuming results are lists and we want to append
+%% them, and also that we have some AckTags we want to pass in to each
+%% invocation.
+fold_by_acktags2(Fun, AckTags, State) ->
+    AckTagsByPriority = partition_acktags(AckTags),
+    fold_append2(fun (P, BQSN) ->
+                         case orddict:find(P, AckTagsByPriority) of
+                             {ok, AckTagsN} -> Fun(AckTagsN, BQSN);
+                             error          -> {[], BQSN}
+                         end
+                 end, State).
+
+%% For a given thing, just go to its BQ
+pick2(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
+    {P, BQSN} = priority(Prioritisable, BQSs),
+    {Res, BQSN1} = Fun(P, BQSN),
+    {Res, a(State#state{bqss = bq_store(P, BQSN1, BQSs)})}.
+
+%% Run through BQs in priority order until one does not return
+%% {NotFound, NewState} or we have gone through them all.
+find2(Fun, NotFound, State = #state{bqss = BQSs}) ->
+    {Res, BQSs1} = find2(Fun, NotFound, BQSs, []),
+    {Res, a(State#state{bqss = BQSs1})}.
+find2(Fun, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
+    case Fun(P, BQSN) of
+        {NotFound, BQSN1} -> find2(Fun, NotFound, Rest, [{P, BQSN1} | BQSAcc]);
+        {Res, BQSN1}      -> {Res, lists:reverse([{P, BQSN1} | BQSAcc]) ++ Rest}
+    end;
+find2(_Fun, NotFound, [], BQSAcc) ->
+    {NotFound, lists:reverse(BQSAcc)}.
+
+%% Run through BQs in priority order like find2 but also folding as we go.
+findfold3(Fun, Acc, NotFound, State = #state{bqss = BQSs}) ->
+    {Res, Acc1, BQSs1} = findfold3(Fun, Acc, NotFound, BQSs, []),
+    {Res, Acc1, a(State#state{bqss = BQSs1})}.
+findfold3(Fun, Acc, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
+    case Fun(P, BQSN, Acc) of
+        {NotFound, Acc1, BQSN1} ->
+            findfold3(Fun, Acc1, NotFound, Rest, [{P, BQSN1} | BQSAcc]);
+        {Res, Acc1, BQSN1} ->
+            {Res, Acc1, lists:reverse([{P, BQSN1} | BQSAcc]) ++ Rest}
+    end;
+findfold3(_Fun, Acc, NotFound, [], BQSAcc) ->
+    {NotFound, Acc, lists:reverse(BQSAcc)}.
+
+bq_fetch(P, [])               -> exit({not_found, P});
+bq_fetch(P, [{P,  BQSN} | _]) -> BQSN;
+bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T).
+
+bq_store(P, BQS, BQSs) ->
+    [{PN, case PN of
+              P -> BQS;
+              _ -> BQSN
+          end} || {PN, BQSN} <- BQSs].
+
+%%
+a(State = #state{bqss = BQSs}) ->
+    Ps = [P || {P, _} <- BQSs],
+    case lists:reverse(lists:usort(Ps)) of
+        Ps -> State;
+        _  -> exit({bad_order, Ps})
+    end.
+
+%%----------------------------------------------------------------------------
+
+priority(P, BQSs) when is_integer(P) ->
+    {P, bq_fetch(P, BQSs)};
+priority(#basic_message{content = Content}, BQSs) ->
+    priority1(rabbit_binary_parser:ensure_content_decoded(Content), BQSs).
+
+priority1(_Content, [{P, BQSN}]) ->
+    {P, BQSN};
+priority1(Content = #content{properties = Props},
+         [{P, BQSN} | Rest]) ->
+    #'P_basic'{priority = Priority0} = Props,
+    Priority = case Priority0 of
+                   undefined                    -> 0;
+                   _ when is_integer(Priority0) -> Priority0
+               end,
+    case Priority >= P of
+        true  -> {P, BQSN};
+        false -> priority1(Content, Rest)
+    end.
+
+add_maybe_infinity(infinity, _) -> infinity;
+add_maybe_infinity(_, infinity) -> infinity;
+add_maybe_infinity(A, B)        -> A + B.
+
+partition_acktags(AckTags) -> partition_acktags(AckTags, orddict:new()).
+
+partition_acktags([], Partitioned) ->
+    orddict:map(fun (_P, RevAckTags) ->
+                        lists:reverse(RevAckTags)
+                end, Partitioned);
+partition_acktags([{P, AckTag} | Rest], Partitioned) ->
+    partition_acktags(Rest, rabbit_misc:orddict_cons(P, AckTag, Partitioned)).
+
+priority_on_acktags(P, AckTags) ->
+    [case Tag of
+         _ when is_integer(Tag) -> {P, Tag};
+         _                      -> Tag
+     end || Tag <- AckTags].
+
+combine_status(P, New, nothing) ->
+    [{priority_lengths, [{P, proplists:get_value(len, New)}]} | New];
+combine_status(P, New, Old) ->
+    Combined = [{K, cse(V, proplists:get_value(K, Old))} || {K, V} <- New],
+    Lens = [{P, proplists:get_value(len, New)} |
+            proplists:get_value(priority_lengths, Old)],
+    [{priority_lengths, Lens} | Combined].
+
+cse(infinity, _)            -> infinity;
+cse(_, infinity)            -> infinity;
+cse(A, B) when is_number(A) -> A + B;
+cse({delta, _, _, _}, _)    -> {delta, todo, todo, todo};
+cse(A, B)                   -> exit({A, B}).
index 70a4da1e47cbb8e2fb605abf03efd65da98a071e..734228be34977d1b766c4dbda30102c0e7a57a54 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_queue_collector).
index 4b1f07de0d204af05c7bb94dc64205985c7ada50..ae8481aaf8a4fbc1dfaac59f77ac20a3d537d1d5 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_queue_consumers).
@@ -125,7 +125,8 @@ unacknowledged_message_count() ->
     lists:sum([queue:len(C#cr.acktags) || C <- all_ch_record()]).
 
 add(ChPid, CTag, NoAck, LimiterPid, LimiterActive, Prefetch, Args, IsEmpty,
-    State = #state{consumers = Consumers}) ->
+    State = #state{consumers = Consumers,
+                   use       = CUInfo}) ->
     C = #cr{consumer_count = Count,
             limiter        = Limiter} = ch_record(ChPid, LimiterPid),
     Limiter1 = case LimiterActive of
@@ -144,7 +145,8 @@ add(ChPid, CTag, NoAck, LimiterPid, LimiterActive, Prefetch, Args, IsEmpty,
                          ack_required = not NoAck,
                          prefetch     = Prefetch,
                          args         = Args},
-    State#state{consumers = add_consumer({ChPid, Consumer}, Consumers)}.
+    State#state{consumers = add_consumer({ChPid, Consumer}, Consumers),
+                use       = update_use(CUInfo, active)}.
 
 remove(ChPid, CTag, State = #state{consumers = Consumers}) ->
     case lookup_ch(ChPid) of
@@ -173,10 +175,11 @@ erase_ch(ChPid, State = #state{consumers = Consumers}) ->
         C = #cr{ch_pid            = ChPid,
                 acktags           = ChAckTags,
                 blocked_consumers = BlockedQ} ->
-            AllConsumers = priority_queue:join(Consumers, BlockedQ),
+            All = priority_queue:join(Consumers, BlockedQ),
             ok = erase_ch_record(C),
+            Filtered = priority_queue:filter(chan_pred(ChPid, true), All),
             {[AckTag || {AckTag, _CTag} <- queue:to_list(ChAckTags)],
-             tags(priority_queue:to_list(AllConsumers)),
+             tags(priority_queue:to_list(Filtered)),
              State#state{consumers = remove_consumers(ChPid, Consumers)}}
     end.
 
@@ -440,9 +443,12 @@ remove_consumer(ChPid, CTag, Queue) ->
                           end, Queue).
 
 remove_consumers(ChPid, Queue) ->
-    priority_queue:filter(fun ({CP, _Consumer}) when CP =:= ChPid -> false;
-                              (_)                                 -> true
-                          end, Queue).
+    priority_queue:filter(chan_pred(ChPid, false), Queue).
+
+chan_pred(ChPid, Want) ->
+    fun ({CP, _Consumer}) when CP =:= ChPid -> Want;
+        (_)                                 -> not Want
+    end.
 
 update_use({inactive, _, _, _}   = CUInfo, inactive) ->
     CUInfo;
index 6205e2dc1859f0b628cd9bfbe8d0c628511427f6..adfe0c7faec0d6e79aa811ee295bd25baeb9b476 100644 (file)
@@ -2,7 +2,7 @@
 
 -include("rabbit.hrl").
 
--export([select/1, set/1]).
+-export([select/1, set/1, register/2, unregister/1]).
 
 %%----------------------------------------------------------------------------
 
@@ -41,3 +41,24 @@ select(Modules) ->
 set(Q) -> Q#amqqueue{decorators = [D || D <- list(), D:active_for(Q)]}.
 
 list() -> [M || {_, M} <- rabbit_registry:lookup_all(queue_decorator)].
+
+register(TypeName, ModuleName) ->
+    rabbit_registry:register(queue_decorator, TypeName, ModuleName),
+    [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
+    ok.
+
+unregister(TypeName) ->
+    rabbit_registry:unregister(queue_decorator, TypeName),
+    [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
+    ok.
+
+maybe_recover(Q = #amqqueue{name       = Name,
+                            decorators = Decs}) ->
+    #amqqueue{decorators = Decs1} = set(Q),
+    Old = lists:sort(select(Decs)),
+    New = lists:sort(select(Decs1)),
+    case New of
+        Old -> ok;
+        _   -> [M:startup(Q) || M <- New -- Old],
+               rabbit_amqqueue:update_decorators(Name)
+    end.
index 56c19d3f51a2be8f68babd30c1ff076751eb87a8..0c7d7c230a884aa44364d0b41689efc86c28ca5c 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_queue_index).
 
--export([init/2, recover/5,
+-export([erase/1, init/3, recover/6,
          terminate/2, delete_and_terminate/1,
-         publish/5, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
+         publish/6, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
          read/3, next_segment_boundary/1, bounds/1, start/1, stop/0]).
 
--export([add_queue_ttl/0, avoid_zeroes/0]).
+-export([add_queue_ttl/0, avoid_zeroes/0, store_msg_size/0, store_msg/0]).
 
 -define(CLEAN_FILENAME, "clean.dot").
 
 %%----------------------------------------------------------------------------
 
 %% The queue index is responsible for recording the order of messages
-%% within a queue on disk.
+%% within a queue on disk. As such it contains records of messages
+%% being published, delivered and acknowledged. The publish record
+%% includes the sequence ID, message ID and a small quantity of
+%% metadata about the message; the delivery and acknowledgement
+%% records just contain the sequence ID. A publish record may also
+%% contain the complete message if provided to publish/5; this allows
+%% the message store to be avoided altogether for small messages. In
+%% either case the publish record is stored in memory in the same
+%% serialised format it will take on disk.
 %%
 %% Because of the fact that the queue can decide at any point to send
 %% a queue entry to disk, you can not rely on publishes appearing in
@@ -36,7 +44,7 @@
 %% then delivered, then ack'd.
 %%
 %% In order to be able to clean up ack'd messages, we write to segment
-%% files. These files have a fixed maximum size: ?SEGMENT_ENTRY_COUNT
+%% files. These files have a fixed number of entries: ?SEGMENT_ENTRY_COUNT
 %% publishes, delivers and acknowledgements. They are numbered, and so
 %% it is known that the 0th segment contains messages 0 ->
 %% ?SEGMENT_ENTRY_COUNT - 1, the 1st segment contains messages
@@ -85,7 +93,7 @@
 %% and seeding the message store on start up.
 %%
 %% Note that in general, the representation of a message's state as
-%% the tuple: {('no_pub'|{MsgId, MsgProps, IsPersistent}),
+%% the tuple: {('no_pub'|{IsPersistent, Bin, MsgBin}),
 %% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly
 %% necessary for most operations. However, for startup, and to ensure
 %% the safe and correct combination of journal entries with entries
 -define(REL_SEQ_ONLY_RECORD_BYTES, 2).
 
 %% publish record is binary 1 followed by a bit for is_persistent,
-%% then 14 bits of rel seq id, 64 bits for message expiry and 128 bits
-%% of md5sum msg id
+%% then 14 bits of rel seq id, 64 bits for message expiry, 32 bits of
+%% size and then 128 bits of md5sum msg id.
 -define(PUB_PREFIX, 1).
 -define(PUB_PREFIX_BITS, 1).
 
 -define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes
 -define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)).
 
+%% This is the size of the message body content, for stats
+-define(SIZE_BYTES, 4).
+-define(SIZE_BITS, (?SIZE_BYTES * 8)).
+
+%% This is the size of the message record embedded in the queue
+%% index. If 0, the message can be found in the message store.
+-define(EMBEDDED_SIZE_BYTES, 4).
+-define(EMBEDDED_SIZE_BITS, (?EMBEDDED_SIZE_BYTES * 8)).
+
 %% 16 bytes for md5sum + 8 for expiry
--define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)).
-%% + 2 for seq, bits and prefix
--define(PUB_RECORD_BYTES, (?PUB_RECORD_BODY_BYTES + 2)).
+-define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES + ?SIZE_BYTES)).
+%% + 4 for size
+-define(PUB_RECORD_SIZE_BYTES, (?PUB_RECORD_BODY_BYTES + ?EMBEDDED_SIZE_BYTES)).
 
-%% 1 publish, 1 deliver, 1 ack per msg
--define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT *
-            (?PUB_RECORD_BYTES + (2 * ?REL_SEQ_ONLY_RECORD_BYTES))).
+%% + 2 for seq, bits and prefix
+-define(PUB_RECORD_PREFIX_BYTES, 2).
 
 %% ---- misc ----
 
--define(PUB, {_, _, _}). %% {MsgId, MsgProps, IsPersistent}
+-define(PUB, {_, _, _}). %% {IsPersistent, Bin, MsgBin}
 
 -define(READ_MODE, [binary, raw, read]).
--define(READ_AHEAD_MODE, [{read_ahead, ?SEGMENT_TOTAL_SIZE} | ?READ_MODE]).
 -define(WRITE_MODE, [write | ?READ_MODE]).
 
 %%----------------------------------------------------------------------------
 
--record(qistate, { dir, segments, journal_handle, dirty_count,
-                   max_journal_entries, on_sync, unconfirmed }).
+-record(qistate, {dir, segments, journal_handle, dirty_count,
+                  max_journal_entries, on_sync, on_sync_msg,
+                  unconfirmed, unconfirmed_msg}).
 
--record(segment, { num, path, journal_entries, unacked }).
+-record(segment, {num, path, journal_entries, unacked}).
 
 -include("rabbit.hrl").
 
 %%----------------------------------------------------------------------------
 
--rabbit_upgrade({add_queue_ttl, local, []}).
--rabbit_upgrade({avoid_zeroes,  local, [add_queue_ttl]}).
+-rabbit_upgrade({add_queue_ttl,  local, []}).
+-rabbit_upgrade({avoid_zeroes,   local, [add_queue_ttl]}).
+-rabbit_upgrade({store_msg_size, local, [avoid_zeroes]}).
+-rabbit_upgrade({store_msg,      local, [store_msg_size]}).
 
 -ifdef(use_specs).
 
 -type(segment() :: ('undefined' |
                     #segment { num             :: non_neg_integer(),
                                path            :: file:filename(),
-                               journal_entries :: array(),
+                               journal_entries :: array:array(),
                                unacked         :: non_neg_integer()
                              })).
 -type(seq_id() :: integer()).
--type(seg_dict() :: {dict(), [segment()]}).
--type(on_sync_fun() :: fun ((gb_set()) -> ok)).
+-type(seg_dict() :: {dict:dict(), [segment()]}).
+-type(on_sync_fun() :: fun ((gb_sets:set()) -> ok)).
 -type(qistate() :: #qistate { dir                 :: file:filename(),
                               segments            :: 'undefined' | seg_dict(),
                               journal_handle      :: hdl(),
                               dirty_count         :: integer(),
                               max_journal_entries :: non_neg_integer(),
                               on_sync             :: on_sync_fun(),
-                              unconfirmed         :: gb_set()
+                              on_sync_msg         :: on_sync_fun(),
+                              unconfirmed         :: gb_sets:set(),
+                              unconfirmed_msg     :: gb_sets:set()
                             }).
 -type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())).
 -type(walker(A) :: fun ((A) -> 'finished' |
                                {rabbit_types:msg_id(), non_neg_integer(), A})).
 -type(shutdown_terms() :: [term()] | 'non_clean_shutdown').
 
--spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()).
--spec(recover/5 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(),
-                    contains_predicate(), on_sync_fun()) ->
-                        {'undefined' | non_neg_integer(), qistate()}).
+-spec(erase/1 :: (rabbit_amqqueue:name()) -> 'ok').
+-spec(init/3 :: (rabbit_amqqueue:name(),
+                 on_sync_fun(), on_sync_fun()) -> qistate()).
+-spec(recover/6 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(),
+                    contains_predicate(),
+                    on_sync_fun(), on_sync_fun()) ->
+                        {'undefined' | non_neg_integer(),
+                         'undefined' | non_neg_integer(), qistate()}).
 -spec(terminate/2 :: ([any()], qistate()) -> qistate()).
 -spec(delete_and_terminate/1 :: (qistate()) -> qistate()).
--spec(publish/5 :: (rabbit_types:msg_id(), seq_id(),
-                    rabbit_types:message_properties(), boolean(), qistate())
-                   -> qistate()).
+-spec(publish/6 :: (rabbit_types:msg_id(), seq_id(),
+                    rabbit_types:message_properties(), boolean(),
+                    non_neg_integer(), qistate()) -> qistate()).
 -spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()).
 -spec(ack/2 :: ([seq_id()], qistate()) -> qistate()).
 -spec(sync/1 :: (qistate()) -> qistate()).
 %% public API
 %%----------------------------------------------------------------------------
 
-init(Name, OnSyncFun) ->
+erase(Name) ->
+    #qistate { dir = Dir } = blank_state(Name),
+    case rabbit_file:is_dir(Dir) of
+        true  -> rabbit_file:recursive_delete([Dir]);
+        false -> ok
+    end.
+
+init(Name, OnSyncFun, OnSyncMsgFun) ->
     State = #qistate { dir = Dir } = blank_state(Name),
     false = rabbit_file:is_file(Dir), %% is_file == is file or dir
-    State #qistate { on_sync = OnSyncFun }.
+    State#qistate{on_sync     = OnSyncFun,
+                  on_sync_msg = OnSyncMsgFun}.
 
-recover(Name, Terms, MsgStoreRecovered, ContainsCheckFun, OnSyncFun) ->
+recover(Name, Terms, MsgStoreRecovered, ContainsCheckFun,
+        OnSyncFun, OnSyncMsgFun) ->
     State = blank_state(Name),
-    State1 = State #qistate { on_sync = OnSyncFun },
+    State1 = State #qistate{on_sync     = OnSyncFun,
+                            on_sync_msg = OnSyncMsgFun},
     CleanShutdown = Terms /= non_clean_shutdown,
     case CleanShutdown andalso MsgStoreRecovered of
         true  -> RecoveredCounts = proplists:get_value(segments, Terms, []),
@@ -254,26 +288,35 @@ delete_and_terminate(State) ->
     ok = rabbit_file:recursive_delete([Dir]),
     State1.
 
-publish(MsgId, SeqId, MsgProps, IsPersistent,
-        State = #qistate { unconfirmed = Unconfirmed })
-  when is_binary(MsgId) ->
+publish(MsgOrId, SeqId, MsgProps, IsPersistent, JournalSizeHint,
+        State = #qistate{unconfirmed     = UC,
+                         unconfirmed_msg = UCM}) ->
+    MsgId = case MsgOrId of
+                #basic_message{id = Id} -> Id;
+                Id when is_binary(Id)   -> Id
+            end,
     ?MSG_ID_BYTES = size(MsgId),
     {JournalHdl, State1} =
         get_journal_handle(
-          case MsgProps#message_properties.needs_confirming of
-              true  -> Unconfirmed1 = gb_sets:add_element(MsgId, Unconfirmed),
-                       State #qistate { unconfirmed = Unconfirmed1 };
-              false -> State
+          case {MsgProps#message_properties.needs_confirming, MsgOrId} of
+              {true,  MsgId} -> UC1  = gb_sets:add_element(MsgId, UC),
+                                State#qistate{unconfirmed     = UC1};
+              {true,  _}     -> UCM1 = gb_sets:add_element(MsgId, UCM),
+                                State#qistate{unconfirmed_msg = UCM1};
+              {false, _}     -> State
           end),
+    file_handle_cache_stats:update(queue_index_journal_write),
+    {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps),
     ok = file_handle_cache:append(
            JournalHdl, [<<(case IsPersistent of
                                true  -> ?PUB_PERSIST_JPREFIX;
                                false -> ?PUB_TRANS_JPREFIX
                            end):?JPREFIX_BITS,
-                          SeqId:?SEQ_BITS>>,
-                        create_pub_record_body(MsgId, MsgProps)]),
+                          SeqId:?SEQ_BITS, Bin/binary,
+                          (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin]),
     maybe_flush_journal(
-      add_to_journal(SeqId, {MsgId, MsgProps, IsPersistent}, State1)).
+      JournalSizeHint,
+      add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State1)).
 
 deliver(SeqIds, State) ->
     deliver_or_ack(del, SeqIds, State).
@@ -289,10 +332,12 @@ sync(State = #qistate { journal_handle = JournalHdl }) ->
     ok = file_handle_cache:sync(JournalHdl),
     notify_sync(State).
 
-needs_sync(#qistate { journal_handle = undefined }) ->
+needs_sync(#qistate{journal_handle = undefined}) ->
     false;
-needs_sync(#qistate { journal_handle = JournalHdl, unconfirmed = UC }) ->
-    case gb_sets:is_empty(UC) of
+needs_sync(#qistate{journal_handle  = JournalHdl,
+                    unconfirmed     = UC,
+                    unconfirmed_msg = UCM}) ->
+    case gb_sets:is_empty(UC) andalso gb_sets:is_empty(UCM) of
         true  -> case file_handle_cache:needs_sync(JournalHdl) of
                      true  -> other;
                      false -> false
@@ -396,7 +441,9 @@ blank_state_dir(Dir) ->
                dirty_count         = 0,
                max_journal_entries = MaxJournal,
                on_sync             = fun (_) -> ok end,
-               unconfirmed         = gb_sets:new() }.
+               on_sync_msg         = fun (_) -> ok end,
+               unconfirmed         = gb_sets:new(),
+               unconfirmed_msg     = gb_sets:new() }.
 
 init_clean(RecoveredCounts, State) ->
     %% Load the journal. Since this is a clean recovery this (almost)
@@ -415,7 +462,7 @@ init_clean(RecoveredCounts, State) ->
           end, Segments, RecoveredCounts),
     %% the counts above include transient messages, which would be the
     %% wrong thing to return
-    {undefined, State1 # qistate { segments = Segments1 }}.
+    {undefined, undefined, State1 # qistate { segments = Segments1 }}.
 
 init_dirty(CleanShutdown, ContainsCheckFun, State) ->
     %% Recover the journal completely. This will also load segments
@@ -424,7 +471,7 @@ init_dirty(CleanShutdown, ContainsCheckFun, State) ->
     %% and the journal.
     State1 = #qistate { dir = Dir, segments = Segments } =
         recover_journal(State),
-    {Segments1, Count, DirtyCount} =
+    {Segments1, Count, Bytes, DirtyCount} =
         %% Load each segment in turn and filter out messages that are
         %% not in the msg_store, by adding acks to the journal. These
         %% acks only go to the RAM journal as it doesn't matter if we
@@ -433,16 +480,18 @@ init_dirty(CleanShutdown, ContainsCheckFun, State) ->
         %% dirty count here, so we can call maybe_flush_journal below
         %% and avoid unnecessary file system operations.
         lists:foldl(
-          fun (Seg, {Segments2, CountAcc, DirtyCount}) ->
-                  {Segment = #segment { unacked = UnackedCount }, Dirty} =
+          fun (Seg, {Segments2, CountAcc, BytesAcc, DirtyCount}) ->
+                  {{Segment = #segment { unacked = UnackedCount }, Dirty},
+                   UnackedBytes} =
                       recover_segment(ContainsCheckFun, CleanShutdown,
                                       segment_find_or_new(Seg, Dir, Segments2)),
                   {segment_store(Segment, Segments2),
-                   CountAcc + UnackedCount, DirtyCount + Dirty}
-          end, {Segments, 0, 0}, all_segment_nums(State1)),
+                   CountAcc + UnackedCount,
+                   BytesAcc + UnackedBytes, DirtyCount + Dirty}
+          end, {Segments, 0, 0, 0}, all_segment_nums(State1)),
     State2 = maybe_flush_journal(State1 #qistate { segments = Segments1,
                                                    dirty_count = DirtyCount }),
-    {Count, State2}.
+    {Count, Bytes, State2}.
 
 terminate(State = #qistate { journal_handle = JournalHdl,
                              segments = Segments }) ->
@@ -464,12 +513,17 @@ recover_segment(ContainsCheckFun, CleanShutdown,
     {SegEntries1, UnackedCountDelta} =
         segment_plus_journal(SegEntries, JEntries),
     array:sparse_foldl(
-      fun (RelSeq, {{MsgId, _MsgProps, _IsPersistent}, Del, no_ack},
-           SegmentAndDirtyCount) ->
-              recover_message(ContainsCheckFun(MsgId), CleanShutdown,
-                              Del, RelSeq, SegmentAndDirtyCount)
+      fun (RelSeq, {{IsPersistent, Bin, MsgBin}, Del, no_ack},
+           {SegmentAndDirtyCount, Bytes}) ->
+              {MsgOrId, MsgProps} = parse_pub_record_body(Bin, MsgBin),
+              {recover_message(ContainsCheckFun(MsgOrId), CleanShutdown,
+                               Del, RelSeq, SegmentAndDirtyCount),
+               Bytes + case IsPersistent of
+                           true  -> MsgProps#message_properties.size;
+                           false -> 0
+                       end}
       end,
-      {Segment #segment { unacked = UnackedCount + UnackedCountDelta }, 0},
+      {{Segment #segment { unacked = UnackedCount + UnackedCountDelta }, 0}, 0},
       SegEntries1).
 
 recover_message( true,  true,   _Del, _RelSeq, SegmentAndDirtyCount) ->
@@ -522,7 +576,8 @@ queue_index_walker({next, Gatherer}) when is_pid(Gatherer) ->
 queue_index_walker_reader(QueueName, Gatherer) ->
     State = blank_state(QueueName),
     ok = scan_segments(
-           fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok) ->
+           fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok)
+                 when is_binary(MsgId) ->
                    gatherer:sync_in(Gatherer, {MsgId, 1});
                (_SeqId, _MsgId, _MsgProps, _IsPersistent, _IsDelivered,
                 _IsAcked, Acc) ->
@@ -536,9 +591,9 @@ scan_segments(Fun, Acc, State) ->
     Result = lists:foldr(
       fun (Seg, AccN) ->
               segment_entries_foldr(
-                fun (RelSeq, {{MsgId, MsgProps, IsPersistent},
+                fun (RelSeq, {{MsgOrId, MsgProps, IsPersistent},
                               IsDelivered, IsAcked}, AccM) ->
-                        Fun(reconstruct_seq_id(Seg, RelSeq), MsgId, MsgProps,
+                        Fun(reconstruct_seq_id(Seg, RelSeq), MsgOrId, MsgProps,
                             IsPersistent, IsDelivered, IsAcked, AccM)
                 end, AccN, segment_find_or_new(Seg, Dir, Segments))
       end, Acc, all_segment_nums(State1)),
@@ -549,21 +604,35 @@ scan_segments(Fun, Acc, State) ->
 %% expiry/binary manipulation
 %%----------------------------------------------------------------------------
 
-create_pub_record_body(MsgId, #message_properties { expiry = Expiry }) ->
-    [MsgId, expiry_to_binary(Expiry)].
+create_pub_record_body(MsgOrId, #message_properties { expiry = Expiry,
+                                                      size   = Size }) ->
+    ExpiryBin = expiry_to_binary(Expiry),
+    case MsgOrId of
+        MsgId when is_binary(MsgId) ->
+            {<<MsgId/binary, ExpiryBin/binary, Size:?SIZE_BITS>>, <<>>};
+        #basic_message{id = MsgId} ->
+            MsgBin = term_to_binary(MsgOrId),
+            {<<MsgId/binary, ExpiryBin/binary, Size:?SIZE_BITS>>, MsgBin}
+    end.
 
 expiry_to_binary(undefined) -> <<?NO_EXPIRY:?EXPIRY_BITS>>;
 expiry_to_binary(Expiry)    -> <<Expiry:?EXPIRY_BITS>>.
 
-parse_pub_record_body(<<MsgIdNum:?MSG_ID_BITS, Expiry:?EXPIRY_BITS>>) ->
+parse_pub_record_body(<<MsgIdNum:?MSG_ID_BITS, Expiry:?EXPIRY_BITS,
+                        Size:?SIZE_BITS>>, MsgBin) ->
     %% work around for binary data fragmentation. See
     %% rabbit_msg_file:read_next/2
     <<MsgId:?MSG_ID_BYTES/binary>> = <<MsgIdNum:?MSG_ID_BITS>>,
-    Exp = case Expiry of
-              ?NO_EXPIRY -> undefined;
-              X          -> X
-          end,
-    {MsgId, #message_properties { expiry = Exp }}.
+    Props = #message_properties{expiry = case Expiry of
+                                             ?NO_EXPIRY -> undefined;
+                                             X          -> X
+                                         end,
+                                size   = Size},
+    case MsgBin of
+        <<>> -> {MsgId, Props};
+        _    -> Msg = #basic_message{id = MsgId} = binary_to_term(MsgBin),
+                {Msg, Props}
+    end.
 
 %%----------------------------------------------------------------------------
 %% journal manipulation
@@ -606,11 +675,14 @@ add_to_journal(RelSeq, Action, JEntries) ->
             array:reset(RelSeq, JEntries)
     end.
 
-maybe_flush_journal(State = #qistate { dirty_count = DCount,
-                                       max_journal_entries = MaxJournal })
-  when DCount > MaxJournal ->
-    flush_journal(State);
 maybe_flush_journal(State) ->
+    maybe_flush_journal(infinity, State).
+
+maybe_flush_journal(Hint, State = #qistate { dirty_count = DCount,
+                                             max_journal_entries = MaxJournal })
+  when DCount > MaxJournal orelse (Hint =/= infinity andalso DCount > Hint) ->
+    flush_journal(State);
+maybe_flush_journal(_Hint, State) ->
     State.
 
 flush_journal(State = #qistate { segments = Segments }) ->
@@ -634,9 +706,13 @@ append_journal_to_segment(#segment { journal_entries = JEntries,
                                      path = Path } = Segment) ->
     case array:sparse_size(JEntries) of
         0 -> Segment;
-        _ -> {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
+        _ -> Seg = array:sparse_foldr(
+                     fun entry_to_segment/3, [], JEntries),
+             file_handle_cache_stats:update(queue_index_write),
+
+             {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
                                                 [{write_buffer, infinity}]),
-             array:sparse_foldl(fun write_entry_to_segment/3, Hdl, JEntries),
+             file_handle_cache:append(Hdl, Seg),
              ok = file_handle_cache:close(Hdl),
              Segment #segment { journal_entries = array_new() }
     end.
@@ -655,10 +731,13 @@ get_journal_handle(State = #qistate { journal_handle = Hdl }) ->
 %% if you call it more than once on the same state. Assumes the counts
 %% are 0 to start with.
 load_journal(State = #qistate { dir = Dir }) ->
-    case rabbit_file:is_file(filename:join(Dir, ?JOURNAL_FILENAME)) of
+    Path = filename:join(Dir, ?JOURNAL_FILENAME),
+    case rabbit_file:is_file(Path) of
         true  -> {JournalHdl, State1} = get_journal_handle(State),
+                 Size = rabbit_file:file_size(Path),
                  {ok, 0} = file_handle_cache:position(JournalHdl, 0),
-                 load_journal_entries(State1);
+                 {ok, JournalBin} = file_handle_cache:read(JournalHdl, Size),
+                 parse_journal_entries(JournalBin, State1);
         false -> State
     end.
 
@@ -682,41 +761,37 @@ recover_journal(State) ->
           end, Segments),
     State1 #qistate { segments = Segments1 }.
 
-load_journal_entries(State = #qistate { journal_handle = Hdl }) ->
-    case file_handle_cache:read(Hdl, ?SEQ_BYTES) of
-        {ok, <<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>} ->
-            case Prefix of
-                ?DEL_JPREFIX ->
-                    load_journal_entries(add_to_journal(SeqId, del, State));
-                ?ACK_JPREFIX ->
-                    load_journal_entries(add_to_journal(SeqId, ack, State));
-                _ ->
-                    case file_handle_cache:read(Hdl, ?PUB_RECORD_BODY_BYTES) of
-                        %% Journal entry composed only of zeroes was probably
-                        %% produced during a dirty shutdown so stop reading
-                        {ok, <<0:?PUB_RECORD_BODY_BYTES/unit:8>>} ->
-                            State;
-                        {ok, <<Bin:?PUB_RECORD_BODY_BYTES/binary>>} ->
-                            {MsgId, MsgProps} = parse_pub_record_body(Bin),
-                            IsPersistent = case Prefix of
-                                               ?PUB_PERSIST_JPREFIX -> true;
-                                               ?PUB_TRANS_JPREFIX   -> false
-                                           end,
-                            load_journal_entries(
-                              add_to_journal(
-                                SeqId, {MsgId, MsgProps, IsPersistent}, State));
-                        _ErrOrEoF -> %% err, we've lost at least a publish
-                            State
-                    end
-            end;
-        _ErrOrEoF -> State
-    end.
+parse_journal_entries(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                        Rest/binary>>, State) ->
+    parse_journal_entries(Rest, add_to_journal(SeqId, del, State));
+
+parse_journal_entries(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                        Rest/binary>>, State) ->
+    parse_journal_entries(Rest, add_to_journal(SeqId, ack, State));
+parse_journal_entries(<<0:?JPREFIX_BITS, 0:?SEQ_BITS,
+                        0:?PUB_RECORD_SIZE_BYTES/unit:8, _/binary>>, State) ->
+    %% Journal entry composed only of zeroes was probably
+    %% produced during a dirty shutdown so stop reading
+    State;
+parse_journal_entries(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                        Bin:?PUB_RECORD_BODY_BYTES/binary,
+                        MsgSize:?EMBEDDED_SIZE_BITS, MsgBin:MsgSize/binary,
+                        Rest/binary>>, State) ->
+    IsPersistent = case Prefix of
+                       ?PUB_PERSIST_JPREFIX -> true;
+                       ?PUB_TRANS_JPREFIX   -> false
+                   end,
+    parse_journal_entries(
+      Rest, add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State));
+parse_journal_entries(_ErrOrEoF, State) ->
+    State.
 
 deliver_or_ack(_Kind, [], State) ->
     State;
 deliver_or_ack(Kind, SeqIds, State) ->
     JPrefix = case Kind of ack -> ?ACK_JPREFIX; del -> ?DEL_JPREFIX end,
     {JournalHdl, State1} = get_journal_handle(State),
+    file_handle_cache_stats:update(queue_index_journal_write),
     ok = file_handle_cache:append(
            JournalHdl,
            [<<JPrefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>> || SeqId <- SeqIds]),
@@ -724,11 +799,19 @@ deliver_or_ack(Kind, SeqIds, State) ->
                                             add_to_journal(SeqId, Kind, StateN)
                                     end, State1, SeqIds)).
 
-notify_sync(State = #qistate { unconfirmed = UC, on_sync = OnSyncFun }) ->
-    case gb_sets:is_empty(UC) of
-        true  -> State;
-        false -> OnSyncFun(UC),
-                 State #qistate { unconfirmed = gb_sets:new() }
+notify_sync(State = #qistate{unconfirmed     = UC,
+                             unconfirmed_msg = UCM,
+                             on_sync         = OnSyncFun,
+                             on_sync_msg     = OnSyncMsgFun}) ->
+    State1 = case gb_sets:is_empty(UC) of
+                 true  -> State;
+                 false -> OnSyncFun(UC),
+                          State#qistate{unconfirmed = gb_sets:new()}
+             end,
+    case gb_sets:is_empty(UCM) of
+        true  -> State1;
+        false -> OnSyncMsgFun(UCM),
+                 State1#qistate{unconfirmed_msg = gb_sets:new()}
     end.
 
 %%----------------------------------------------------------------------------
@@ -801,42 +884,42 @@ segment_nums({Segments, CachedSegments}) ->
 segments_new() ->
     {dict:new(), []}.
 
-write_entry_to_segment(_RelSeq, {?PUB, del, ack}, Hdl) ->
-    Hdl;
-write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) ->
-    ok = case Pub of
-             no_pub ->
-                 ok;
-             {MsgId, MsgProps, IsPersistent} ->
-                 file_handle_cache:append(
-                   Hdl, [<<?PUB_PREFIX:?PUB_PREFIX_BITS,
-                           (bool_to_int(IsPersistent)):1,
-                           RelSeq:?REL_SEQ_BITS>>,
-                         create_pub_record_body(MsgId, MsgProps)])
-         end,
-    ok = case {Del, Ack} of
-             {no_del, no_ack} ->
-                 ok;
-             _ ->
-                 Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
-                            RelSeq:?REL_SEQ_BITS>>,
-                 file_handle_cache:append(
-                   Hdl, case {Del, Ack} of
-                            {del, ack} -> [Binary, Binary];
-                            _          -> Binary
-                        end)
-         end,
-    Hdl.
+entry_to_segment(_RelSeq, {?PUB, del, ack}, Buf) ->
+    Buf;
+entry_to_segment(RelSeq, {Pub, Del, Ack}, Buf) ->
+    %% NB: we are assembling the segment in reverse order here, so
+    %% del/ack comes first.
+    Buf1 = case {Del, Ack} of
+               {no_del, no_ack} ->
+                   Buf;
+               _ ->
+                   Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+                              RelSeq:?REL_SEQ_BITS>>,
+                   case {Del, Ack} of
+                       {del, ack} -> [[Binary, Binary] | Buf];
+                       _          -> [Binary | Buf]
+                   end
+           end,
+    case Pub of
+        no_pub ->
+            Buf1;
+        {IsPersistent, Bin, MsgBin} ->
+            [[<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+                (bool_to_int(IsPersistent)):1,
+                RelSeq:?REL_SEQ_BITS, Bin/binary,
+                (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin] | Buf1]
+    end.
 
 read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq},
                      {Messages, Segments}, Dir) ->
     Segment = segment_find_or_new(Seg, Dir, Segments),
     {segment_entries_foldr(
-       fun (RelSeq, {{MsgId, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc)
+       fun (RelSeq, {{MsgOrId, MsgProps, IsPersistent}, IsDelivered, no_ack},
+            Acc)
              when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso
                   (Seg < EndSeg   orelse EndRelSeq   >= RelSeq) ->
-               [ {MsgId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps,
-                  IsPersistent, IsDelivered == del} | Acc ];
+               [{MsgOrId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps,
+                 IsPersistent, IsDelivered == del} | Acc];
            (_RelSeq, _Value, Acc) ->
                Acc
        end, Messages, Segment),
@@ -846,7 +929,11 @@ segment_entries_foldr(Fun, Init,
                       Segment = #segment { journal_entries = JEntries }) ->
     {SegEntries, _UnackedCount} = load_segment(false, Segment),
     {SegEntries1, _UnackedCountD} = segment_plus_journal(SegEntries, JEntries),
-    array:sparse_foldr(Fun, Init, SegEntries1).
+    array:sparse_foldr(
+      fun (RelSeq, {{IsPersistent, Bin, MsgBin}, Del, Ack}, Acc) ->
+              {MsgOrId, MsgProps} = parse_pub_record_body(Bin, MsgBin),
+              Fun(RelSeq, {{MsgOrId, MsgProps, IsPersistent}, Del, Ack}, Acc)
+      end, Init, SegEntries1).
 
 %% Loading segments
 %%
@@ -855,44 +942,48 @@ load_segment(KeepAcked, #segment { path = Path }) ->
     Empty = {array_new(), 0},
     case rabbit_file:is_file(Path) of
         false -> Empty;
-        true  -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []),
+        true  -> Size = rabbit_file:file_size(Path),
+                 file_handle_cache_stats:update(queue_index_read),
+                 {ok, Hdl} = file_handle_cache:open(Path, ?READ_MODE, []),
                  {ok, 0} = file_handle_cache:position(Hdl, bof),
-                 Res = case file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE) of
-                           {ok, SegData} -> load_segment_entries(
-                                              KeepAcked, SegData, Empty);
-                           eof           -> Empty
-                       end,
+                 {ok, SegBin} = file_handle_cache:read(Hdl, Size),
                  ok = file_handle_cache:close(Hdl),
+                 Res = parse_segment_entries(SegBin, KeepAcked, Empty),
                  Res
     end.
 
-load_segment_entries(KeepAcked,
-                     <<?PUB_PREFIX:?PUB_PREFIX_BITS,
-                       IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
-                       PubRecordBody:?PUB_RECORD_BODY_BYTES/binary,
-                       SegData/binary>>,
-                     {SegEntries, UnackedCount}) ->
-    {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody),
-    Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack},
+parse_segment_entries(<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+                        IsPersistNum:1, RelSeq:?REL_SEQ_BITS, Rest/binary>>,
+                      KeepAcked, Acc) ->
+    parse_segment_publish_entry(
+      Rest, 1 == IsPersistNum, RelSeq, KeepAcked, Acc);
+parse_segment_entries(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+                       RelSeq:?REL_SEQ_BITS, Rest/binary>>, KeepAcked, Acc) ->
+    parse_segment_entries(
+      Rest, KeepAcked, add_segment_relseq_entry(KeepAcked, RelSeq, Acc));
+parse_segment_entries(<<>>, _KeepAcked, Acc) ->
+    Acc.
+
+parse_segment_publish_entry(<<Bin:?PUB_RECORD_BODY_BYTES/binary,
+                              MsgSize:?EMBEDDED_SIZE_BITS,
+                              MsgBin:MsgSize/binary, Rest/binary>>,
+                            IsPersistent, RelSeq, KeepAcked,
+                            {SegEntries, Unacked}) ->
+    Obj = {{IsPersistent, Bin, MsgBin}, no_del, no_ack},
     SegEntries1 = array:set(RelSeq, Obj, SegEntries),
-    load_segment_entries(KeepAcked, SegData, {SegEntries1, UnackedCount + 1});
-load_segment_entries(KeepAcked,
-                     <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
-                       RelSeq:?REL_SEQ_BITS, SegData/binary>>,
-                     {SegEntries, UnackedCount}) ->
-    {UnackedCountDelta, SegEntries1} =
-        case array:get(RelSeq, SegEntries) of
-            {Pub, no_del, no_ack} ->
-                { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)};
-            {Pub, del, no_ack} when KeepAcked ->
-                {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)};
-            {_Pub, del, no_ack} ->
-                {-1, array:reset(RelSeq, SegEntries)}
-        end,
-    load_segment_entries(KeepAcked, SegData,
-                         {SegEntries1, UnackedCount + UnackedCountDelta});
-load_segment_entries(_KeepAcked, _SegData, Res) ->
-    Res.
+    parse_segment_entries(Rest, KeepAcked, {SegEntries1, Unacked + 1});
+parse_segment_publish_entry(Rest, _IsPersistent, _RelSeq, KeepAcked, Acc) ->
+    parse_segment_entries(Rest, KeepAcked, Acc).
+
+add_segment_relseq_entry(KeepAcked, RelSeq, {SegEntries, Unacked}) ->
+    case array:get(RelSeq, SegEntries) of
+        {Pub, no_del, no_ack} ->
+            {array:set(RelSeq, {Pub, del, no_ack}, SegEntries), Unacked};
+        {Pub, del, no_ack} when KeepAcked ->
+            {array:set(RelSeq, {Pub, del, ack},    SegEntries), Unacked - 1};
+        {_Pub, del, no_ack} ->
+            {array:reset(RelSeq,                   SegEntries), Unacked - 1}
+    end.
 
 array_new() ->
     array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
@@ -1064,6 +1155,76 @@ avoid_zeroes_segment(<<0:?REL_SEQ_ONLY_PREFIX_BITS,
 avoid_zeroes_segment(_) ->
     stop.
 
+%% At upgrade time we just define every message's size as 0 - that
+%% will save us a load of faff with the message store, and means we
+%% can actually use the clean recovery terms in VQ. It does mean we
+%% don't count message bodies from before the migration, but we can
+%% live with that.
+store_msg_size() ->
+    foreach_queue_index({fun store_msg_size_journal/1,
+                         fun store_msg_size_segment/1}).
+
+store_msg_size_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                        Rest/binary>>) ->
+    {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_size_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                        Rest/binary>>) ->
+    {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_size_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                         MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS,
+                         Rest/binary>>) ->
+    {<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS, MsgId:?MSG_ID_BITS,
+       Expiry:?EXPIRY_BITS, 0:?SIZE_BITS>>, Rest};
+store_msg_size_journal(_) ->
+    stop.
+
+store_msg_size_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+                         RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+                         Expiry:?EXPIRY_BITS, Rest/binary>>) ->
+    {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+       MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, 0:?SIZE_BITS>>, Rest};
+store_msg_size_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+                        RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+    {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+     Rest};
+store_msg_size_segment(_) ->
+    stop.
+
+store_msg() ->
+    foreach_queue_index({fun store_msg_journal/1,
+                         fun store_msg_segment/1}).
+
+store_msg_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                    Rest/binary>>) ->
+    {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                    Rest/binary>>) ->
+    {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+                    MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+                    Rest/binary>>) ->
+    {<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS, MsgId:?MSG_ID_BITS,
+       Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+       0:?EMBEDDED_SIZE_BITS>>, Rest};
+store_msg_journal(_) ->
+    stop.
+
+store_msg_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+                    RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+                    Expiry:?EXPIRY_BITS, Size:?SIZE_BITS, Rest/binary>>) ->
+    {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+       MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+       0:?EMBEDDED_SIZE_BITS>>, Rest};
+store_msg_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+                    RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+    {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+     Rest};
+store_msg_segment(_) ->
+    stop.
+
+
+
+
 %%----------------------------------------------------------------------------
 
 foreach_queue_index(Funs) ->
@@ -1099,7 +1260,7 @@ transform_file(Path, Fun) when is_function(Fun)->
                                            [{write_buffer, infinity}]),
 
                 {ok, PathHdl} = file_handle_cache:open(
-                                  Path, [{read_ahead, Size} | ?READ_MODE], []),
+                                  Path, ?READ_MODE, [{read_buffer, Size}]),
                 {ok, Content} = file_handle_cache:read(PathHdl, Size),
                 ok = file_handle_cache:close(PathHdl),
 
index 68cef56aff88f33cd578e4bf2d24602ae55a3968..d296c41344de29119439c5ac321106abb02820b7 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_reader).
@@ -27,7 +27,6 @@
 
 -export([conserve_resources/3, server_properties/1]).
 
--define(HANDSHAKE_TIMEOUT, 10).
 -define(NORMAL_TIMEOUT, 3).
 -define(CLOSING_TIMEOUT, 30).
 -define(CHANNEL_TERMINATION_TIMEOUT, 3).
@@ -43,7 +42,7 @@
 -record(connection, {name, host, peer_host, port, peer_port,
                      protocol, user, timeout_sec, frame_max, channel_max, vhost,
                      client_properties, capabilities,
-                     auth_mechanism, auth_state}).
+                     auth_mechanism, auth_state, connected_at}).
 
 -record(throttle, {alarmed_by, last_blocked_by, last_blocked_at}).
 
         peer_host, ssl, peer_cert_subject, peer_cert_issuer,
         peer_cert_validity, auth_mechanism, ssl_protocol,
         ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
-        timeout, frame_max, channel_max, client_properties]).
+        timeout, frame_max, channel_max, client_properties, connected_at]).
 
 -define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
 
+-define(AUTH_NOTIFICATION_INFO_KEYS,
+        [host, vhost, name, peer_host, peer_port, protocol, auth_mechanism,
+         ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject,
+         peer_cert_validity]).
+
 -define(IS_RUNNING(State),
         (State#v1.connection_state =:= running orelse
          State#v1.connection_state =:= blocking orelse
@@ -189,10 +193,20 @@ server_capabilities(_) ->
 log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
 
 socket_error(Reason) when is_atom(Reason) ->
-    log(error, "error on AMQP connection ~p: ~s~n",
+    log(error, "Error on AMQP connection ~p: ~s~n",
         [self(), rabbit_misc:format_inet_error(Reason)]);
 socket_error(Reason) ->
-    log(error, "error on AMQP connection ~p:~n~p~n", [self(), Reason]).
+    Level =
+        case Reason of
+            {ssl_upgrade_error, closed} ->
+                %% The socket was closed while upgrading to SSL.
+                %% This is presumably a TCP healthcheck, so don't log
+                %% it unless specified otherwise.
+                debug;
+            _ ->
+                error
+        end,
+    log(Level, "Error on AMQP connection ~p:~n~p~n", [self(), Reason]).
 
 inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
 
@@ -215,9 +229,9 @@ start_connection(Parent, HelperSup, Deb, Sock, SockTransform) ->
                                     rabbit_net:fast_close(Sock),
                                     exit(normal)
            end,
-    log(info, "accepting AMQP connection ~p (~s)~n", [self(), Name]),
+    {ok, HandshakeTimeout} = application:get_env(rabbit, handshake_timeout),
     ClientSock = socket_op(Sock, SockTransform),
-    erlang:send_after(?HANDSHAKE_TIMEOUT * 1000, self(), handshake_timeout),
+    erlang:send_after(HandshakeTimeout, self(), handshake_timeout),
     {PeerHost, PeerPort, Host, Port} =
         socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
     ?store_proc_name(list_to_binary(Name)),
@@ -231,13 +245,14 @@ start_connection(Parent, HelperSup, Deb, Sock, SockTransform) ->
                   peer_port          = PeerPort,
                   protocol           = none,
                   user               = none,
-                  timeout_sec        = ?HANDSHAKE_TIMEOUT,
+                  timeout_sec        = (HandshakeTimeout / 1000),
                   frame_max          = ?FRAME_MIN_SIZE,
                   vhost              = none,
                   client_properties  = none,
                   capabilities       = [],
                   auth_mechanism     = none,
-                  auth_state         = none},
+                  auth_state         = none,
+                  connected_at       = rabbit_misc:now_to_ms(os:timestamp())},
                 callback            = uninitialized_callback,
                 recv_len            = 0,
                 pending_recv        = false,
@@ -258,11 +273,8 @@ start_connection(Parent, HelperSup, Deb, Sock, SockTransform) ->
                                           handshake, 8)]}),
         log(info, "closing AMQP connection ~p (~s)~n", [self(), Name])
     catch
-        Ex -> log(case Ex of
-                      connection_closed_abruptly -> warning;
-                      _                          -> error
-                  end, "closing AMQP connection ~p (~s):~n~p~n",
-                  [self(), Name, Ex])
+        Ex ->
+          log_connection_exception(Name, Ex)
     after
         %% We don't call gen_tcp:close/1 here since it waits for
         %% pending output to be sent, which results in unnecessary
@@ -277,6 +289,22 @@ start_connection(Parent, HelperSup, Deb, Sock, SockTransform) ->
     end,
     done.
 
+log_connection_exception(Name, Ex) ->
+  Severity = case Ex of
+      connection_closed_with_no_data_received -> debug;
+      connection_closed_abruptly              -> warning;
+      _                                       -> error
+    end,
+  log_connection_exception(Severity, Name, Ex).
+
+log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) ->
+  %% Long line to avoid extra spaces and line breaks in log
+  log(Severity, "closing AMQP connection ~p (~s):~nMissed heartbeats from client, timeout: ~ps~n",
+    [self(), Name, TimeoutSec]);
+log_connection_exception(Severity, Name, Ex) ->
+  log(Severity, "closing AMQP connection ~p (~s):~n~p~n",
+    [self(), Name, Ex]).
+
 run({M, F, A}) ->
     try apply(M, F, A)
     catch {become, MFA} -> run(MFA)
@@ -312,13 +340,35 @@ binlist_split(Len, L, [Acc0|Acc]) when Len < 0 ->
 binlist_split(Len, [H|T], Acc) ->
     binlist_split(Len - size(H), T, [H|Acc]).
 
-mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock}) ->
-    case rabbit_net:recv(Sock) of
+mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock,
+                                       connection_state = CS,
+                                       connection = #connection{
+                                         name = ConnName}}) ->
+    Recv = rabbit_net:recv(Sock),
+    case CS of
+        pre_init when Buf =:= [] ->
+            %% We only log incoming connections when either the
+            %% first byte was received or there was an error (eg. a
+            %% timeout).
+            %%
+            %% The goal is to not log TCP healthchecks (a connection
+            %% with no data received) unless specified otherwise.
+            log(case Recv of
+                  closed -> debug;
+                  _      -> info
+                end, "accepting AMQP connection ~p (~s)~n",
+                [self(), ConnName]);
+        _ ->
+            ok
+    end,
+    case Recv of
         {data, Data} ->
             recvloop(Deb, [Data | Buf], BufLen + size(Data),
                      State#v1{pending_recv = false});
         closed when State#v1.connection_state =:= closed ->
             ok;
+        closed when CS =:= pre_init andalso Buf =:= [] ->
+            stop(tcp_healthcheck, State);
         closed ->
             stop(closed, State);
         {error, Reason} ->
@@ -333,10 +383,18 @@ mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock}) ->
             end
     end.
 
-stop(closed, State) -> maybe_emit_stats(State),
-                       throw(connection_closed_abruptly);
-stop(Reason, State) -> maybe_emit_stats(State),
-                       throw({inet_error, Reason}).
+stop(tcp_healthcheck, State) ->
+    %% The connection was closed before any packet was received. It's
+    %% probably a load-balancer healthcheck: don't consider this a
+    %% failure.
+    maybe_emit_stats(State),
+    throw(connection_closed_with_no_data_received);
+stop(closed, State) ->
+    maybe_emit_stats(State),
+    throw(connection_closed_abruptly);
+stop(Reason, State) ->
+    maybe_emit_stats(State),
+    throw({inet_error, Reason}).
 
 handle_other({conserve_resources, Source, Conserve},
              State = #v1{throttle = Throttle = #throttle{alarmed_by = CR}}) ->
@@ -387,9 +445,10 @@ handle_other(handshake_timeout, State) ->
     throw({handshake_timeout, State#v1.callback});
 handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
     State;
-handle_other(heartbeat_timeout, State = #v1{connection_state = S}) ->
+handle_other(heartbeat_timeout, 
+             State = #v1{connection = #connection{timeout_sec = T}}) ->
     maybe_emit_stats(State),
-    throw({heartbeat_timeout, S});
+    throw({heartbeat_timeout, T});
 handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
     {ForceTermination, NewState} = terminate(Explanation, State),
     gen_server:reply(From, ok),
@@ -410,8 +469,8 @@ handle_other({'$gen_cast', {force_event_refresh, Ref}}, State)
     rabbit_event:notify(
       connection_created,
       [{type, network} | infos(?CREATION_EVENT_KEYS, State)], Ref),
-    State;
-handle_other({'$gen_cast', force_event_refresh}, State) ->
+    rabbit_event:init_stats_timer(State, #v1.stats_timer);
+handle_other({'$gen_cast', {force_event_refresh, _Ref}}, State) ->
     %% Ignore, we will emit a created event once we start running.
     State;
 handle_other(ensure_stats, State) ->
@@ -548,21 +607,27 @@ wait_for_channel_termination(0, TimerRef, State) ->
                  end;
         _     -> State
     end;
-wait_for_channel_termination(N, TimerRef, State) ->
+wait_for_channel_termination(N, TimerRef,
+                             State = #v1{connection_state = CS,
+                                         connection = #connection{
+                                                         name  = ConnName,
+                                                         user  = User,
+                                                         vhost = VHost}}) ->
     receive
         {'DOWN', _MRef, process, ChPid, Reason} ->
             {Channel, State1} = channel_cleanup(ChPid, State),
             case {Channel, termination_kind(Reason)} of
-                {undefined,    _} -> exit({abnormal_dependent_exit,
-                                           ChPid, Reason});
-                {_,   controlled} -> wait_for_channel_termination(
-                                       N-1, TimerRef, State1);
-                {_, uncontrolled} -> log(error,
-                                         "AMQP connection ~p, channel ~p - "
-                                         "error while terminating:~n~p~n",
-                                         [self(), Channel, Reason]),
-                                     wait_for_channel_termination(
-                                       N-1, TimerRef, State1)
+                {undefined,    _} ->
+                    exit({abnormal_dependent_exit, ChPid, Reason});
+                {_,   controlled} ->
+                    wait_for_channel_termination(N-1, TimerRef, State1);
+                {_, uncontrolled} ->
+                    log(error, "Error on AMQP connection ~p (~s, vhost: '~s',"
+                               " user: '~s', state: ~p), channel ~p:"
+                               "error while terminating:~n~p~n",
+                        [self(), ConnName, VHost, User#user.username,
+                         CS, Channel, Reason]),
+                    wait_for_channel_termination(N-1, TimerRef, State1)
             end;
         cancel_wait ->
             exit(channel_termination_timeout)
@@ -581,16 +646,24 @@ maybe_close(State) ->
 termination_kind(normal) -> controlled;
 termination_kind(_)      -> uncontrolled.
 
+log_hard_error(#v1{connection_state = CS,
+                   connection = #connection{
+                                   name  = ConnName,
+                                   user  = User,
+                                   vhost = VHost}}, Channel, Reason) ->
+    log(error,
+        "Error on AMQP connection ~p (~s, vhost: '~s',"
+        " user: '~s', state: ~p), channel ~p:~n~p~n",
+        [self(), ConnName, VHost, User#user.username, CS, Channel, Reason]).
+
 handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
-    log(error, "AMQP connection ~p (~p), channel ~p - error:~n~p~n",
-        [self(), closed, Channel, Reason]),
+    log_hard_error(State, Channel, Reason),
     State;
 handle_exception(State = #v1{connection = #connection{protocol = Protocol},
                              connection_state = CS},
                  Channel, Reason)
   when ?IS_RUNNING(State) orelse CS =:= closing ->
-    log(error, "AMQP connection ~p (~p), channel ~p - error:~n~p~n",
-        [self(), CS, Channel, Reason]),
+    log_hard_error(State, Channel, Reason),
     {0, CloseMethod} =
         rabbit_binary_generator:map_exception(Channel, Reason, Protocol),
     State1 = close_connection(terminate_channels(State)),
@@ -858,7 +931,7 @@ handle_method0(MethodName, FieldsBin,
     try
         handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin),
                        State)
-    catch throw:{inet_error, closed} ->
+    catch throw:{inet_error, E} when E =:= closed; E =:= enotconn ->
             maybe_emit_stats(State),
             throw(connection_closed_abruptly);
           exit:#amqp_error{method = none} = Reason ->
@@ -929,7 +1002,7 @@ handle_method0(#'connection.open'{virtual_host = VHostPath},
                            helper_sup       = SupPid,
                            sock             = Sock,
                            throttle         = Throttle}) ->
-    ok = rabbit_access_control:check_vhost_access(User, VHostPath),
+    ok = rabbit_access_control:check_vhost_access(User, VHostPath, Sock),
     NewConnection = Connection#connection{vhost = VHostPath},
     ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol),
     Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
@@ -1031,9 +1104,12 @@ auth_phase(Response,
                                        auth_state     = AuthState},
                        sock = Sock}) ->
     case AuthMechanism:handle_response(Response, AuthState) of
-        {refused, Msg, Args} ->
-            auth_fail(Msg, Args, Name, State);
+        {refused, Username, Msg, Args} ->
+            auth_fail(Username, Msg, Args, Name, State);
         {protocol_error, Msg, Args} ->
+            notify_auth_result(none, user_authentication_failure,
+                               [{error, rabbit_misc:format(Msg, Args)}],
+                               State),
             rabbit_misc:protocol_error(syntax_error, Msg, Args);
         {challenge, Challenge, AuthState1} ->
             Secure = #'connection.secure'{challenge = Challenge},
@@ -1042,9 +1118,12 @@ auth_phase(Response,
                                     auth_state = AuthState1}};
         {ok, User = #user{username = Username}} ->
             case rabbit_access_control:check_user_loopback(Username, Sock) of
-                ok          -> ok;
-                not_allowed -> auth_fail("user '~s' can only connect via "
-                                         "localhost", [Username], Name, State)
+                ok ->
+                    notify_auth_result(Username, user_authentication_success,
+                                       [], State);
+                not_allowed ->
+                    auth_fail(Username, "user '~s' can only connect via "
+                              "localhost", [Username], Name, State)
             end,
             Tune = #'connection.tune'{frame_max   = get_env(frame_max),
                                       channel_max = get_env(channel_max),
@@ -1056,11 +1135,15 @@ auth_phase(Response,
     end.
 
 -ifdef(use_specs).
--spec(auth_fail/4 :: (string(), [any()], binary(), #v1{}) -> no_return()).
+-spec(auth_fail/5 ::
+        (rabbit_types:username() | none, string(), [any()], binary(), #v1{}) ->
+           no_return()).
 -endif.
-auth_fail(Msg, Args, AuthName,
+auth_fail(Username, Msg, Args, AuthName,
           State = #v1{connection = #connection{protocol     = Protocol,
                                                capabilities = Capabilities}}) ->
+    notify_auth_result(Username, user_authentication_failure,
+      [{error, rabbit_misc:format(Msg, Args)}], State),
     AmqpError = rabbit_misc:amqp_error(
                   access_refused, "~s login refused: ~s",
                   [AuthName, io_lib:format(Msg, Args)], none),
@@ -1079,6 +1162,16 @@ auth_fail(Msg, Args, AuthName,
     end,
     rabbit_misc:protocol_error(AmqpError).
 
+notify_auth_result(Username, AuthResult, ExtraProps, State) ->
+    EventProps = [{connection_type, network},
+                  {name, case Username of none -> ''; _ -> Username end}] ++
+                 [case Item of
+                      name -> {connection_name, i(name, State)};
+                      _    -> {Item, i(Item, State)}
+                  end || Item <- ?AUTH_NOTIFICATION_INFO_KEYS] ++
+                 ExtraProps,
+    rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
 %%--------------------------------------------------------------------------
 
 infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
@@ -1130,6 +1223,7 @@ ic(channel_max,       #connection{channel_max = ChMax})    -> ChMax;
 ic(client_properties, #connection{client_properties = CP}) -> CP;
 ic(auth_mechanism,    #connection{auth_mechanism = none})  -> none;
 ic(auth_mechanism,    #connection{auth_mechanism = {Name, _Mod}}) -> Name;
+ic(connected_at,      #connection{connected_at = T}) -> T;
 ic(Item,              #connection{}) -> throw({bad_argument, Item}).
 
 socket_info(Get, Select, #v1{sock = Sock}) ->
@@ -1151,9 +1245,9 @@ ssl_info(F, #v1{sock = Sock}) ->
 
 cert_info(F, #v1{sock = Sock}) ->
     case rabbit_net:peercert(Sock) of
-        nossl                -> '';
-        {error, no_peercert} -> '';
-        {ok, Cert}           -> list_to_binary(F(Cert))
+        nossl      -> '';
+        {error, _} -> '';
+        {ok, Cert} -> list_to_binary(F(Cert))
     end.
 
 maybe_emit_stats(State) ->
index bbf38f58c162053514860fc0ee2cbd266ca68458..114029651b0e83463e1d082ab737d259a654cba2 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% We use a gen_server simply so that during the terminate/2 call
 
 -export([start/0, stop/0, store/2, read/1, clear/0]).
 
--export([upgrade_recovery_terms/0, start_link/0]).
+-export([start_link/0]).
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2,
          terminate/2, code_change/3]).
 
+-export([upgrade_recovery_terms/0, persistent_bytes/0]).
+
 -rabbit_upgrade({upgrade_recovery_terms, local, []}).
+-rabbit_upgrade({persistent_bytes, local, [upgrade_recovery_terms]}).
 
 %%----------------------------------------------------------------------------
 
@@ -58,9 +61,11 @@ read(DirBaseName) ->
     end.
 
 clear() ->
-    dets:delete_all_objects(?MODULE),
+    ok = dets:delete_all_objects(?MODULE),
     flush().
 
+start_link() -> gen_server:start_link(?MODULE, [], []).
+
 %%----------------------------------------------------------------------------
 
 upgrade_recovery_terms() ->
@@ -84,7 +89,20 @@ upgrade_recovery_terms() ->
         close_table()
     end.
 
-start_link() -> gen_server:start_link(?MODULE, [], []).
+persistent_bytes()      -> dets_upgrade(fun persistent_bytes/1).
+persistent_bytes(Props) -> Props ++ [{persistent_bytes, 0}].
+
+dets_upgrade(Fun)->
+    open_table(),
+    try
+        ok = dets:foldl(fun ({DirBaseName, Terms}, Acc) ->
+                                store(DirBaseName, Fun(Terms)),
+                                Acc
+                        end, ok, ?MODULE),
+        ok
+    after
+        close_table()
+    end.
 
 %%----------------------------------------------------------------------------
 
@@ -113,9 +131,8 @@ open_table() ->
                                        {ram_file,  true},
                                        {auto_save, infinity}]).
 
-flush() -> dets:sync(?MODULE).
+flush() -> ok = dets:sync(?MODULE).
 
 close_table() ->
     ok = flush(),
     ok = dets:close(?MODULE).
-
index ad8d0d020b744438cb5238edb5123b32954815db..fc016e718ea1d8c4acb5816cd696728cb5e576e3 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_registry).
index 3366bad76e31409c970fea045736de5121bbe274..516eea91e94e7c8db6f6b69fef8cdf2986a1f3ea 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_restartable_sup).
index fca01759aa523dcdb7e2e2b5c60021db4d09fbeb..9692167c6d90bdff61c59ec07614f71b908c1053 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_router).
index 3e81ea74035dc874a27c6858c36e246c56d72daa..1d4bc0b5753fbfb3efc41d536e507e64ffa0635f 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_runtime_parameter).
@@ -32,7 +32,7 @@
 
 behaviour_info(callbacks) ->
     [
-     {validate, 4},
+     {validate, 5},
      {notify, 4},
      {notify_clear, 3}
     ];
index f78549ffcf17add979bde5e0c5b85e41945fe3df..fafd598bb731ebe013127d90233bba27b0ba540a 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_runtime_parameters).
diff --git a/rabbitmq-server/src/rabbit_runtime_parameters_test.erl b/rabbitmq-server/src/rabbit_runtime_parameters_test.erl
deleted file mode 100644 (file)
index 2e69424..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_runtime_parameters_test).
--behaviour(rabbit_runtime_parameter).
--behaviour(rabbit_policy_validator).
-
--include("rabbit.hrl").
-
--export([validate/5, notify/4, notify_clear/3]).
--export([register/0, unregister/0]).
--export([validate_policy/1]).
--export([register_policy_validator/0, unregister_policy_validator/0]).
-
-%----------------------------------------------------------------------------
-
-register() ->
-    rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
-
-unregister() ->
-    rabbit_registry:unregister(runtime_parameter, <<"test">>).
-
-validate(_, <<"test">>, <<"good">>,  _Term, _User)      -> ok;
-validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
-validate(_, <<"test">>, <<"admin">>, _Term, none)       -> ok;
-validate(_, <<"test">>, <<"admin">>, _Term, User) ->
-    case lists:member(administrator, User#user.tags) of
-        true  -> ok;
-        false -> {error, "meh", []}
-    end;
-validate(_, <<"test">>, _, _, _)                        -> {error, "meh", []}.
-
-notify(_, _, _, _) -> ok.
-notify_clear(_, _, _) -> ok.
-
-%----------------------------------------------------------------------------
-
-register_policy_validator() ->
-    rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
-    rabbit_registry:register(policy_validator, <<"testpos">>,  ?MODULE).
-
-unregister_policy_validator() ->
-    rabbit_registry:unregister(policy_validator, <<"testeven">>),
-    rabbit_registry:unregister(policy_validator, <<"testpos">>).
-
-validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
-    case  length(Terms) rem 2 =:= 0 of
-        true  -> ok;
-        false -> {error, "meh", []}
-    end;
-
-validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
-    case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
-        true  -> ok;
-        false -> {error, "meh", []}
-    end;
-
-validate_policy(_) ->
-    {error, "meh", []}.
index 2dd16702b69ebcfc4085cf2d8502126dcf822f8d..c6000a268ab99928a9db904046c2dcedd878beaa 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_sasl_report_file_h).
index bd5dcf070b9f2322f7cb6048cd69c445ed36edc3..c07a913723ccf10b9702d1f4bb337dc4743cd25c 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_ssl).
index c90bb94c8bdffb2f6aa466b05692bdc668946586..537ff8d6601788543b391e0479e1dc1b40a604b4 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_sup).
index da75932d0e8162acdbb4f85f4f9a897ad90a27f2..e716345b85277f5a8f2d247b4034fce326f82640 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_table).
 
 -export([create/0, create_local_copy/1, wait_for_replicated/0, wait/1,
-         force_load/0, is_present/0, is_empty/0,
-         check_schema_integrity/0, clear_ram_only_tables/0]).
+         force_load/0, is_present/0, is_empty/0, needs_default_data/0,
+         check_schema_integrity/0, clear_ram_only_tables/0, wait_timeout/0]).
 
 -include("rabbit.hrl").
 
 -spec(create_local_copy/1 :: ('disc' | 'ram') -> 'ok').
 -spec(wait_for_replicated/0 :: () -> 'ok').
 -spec(wait/1 :: ([atom()]) -> 'ok').
+-spec(wait_timeout/0 :: () -> non_neg_integer() | infinity).
 -spec(force_load/0 :: () -> 'ok').
 -spec(is_present/0 :: () -> boolean()).
 -spec(is_empty/0 :: () -> boolean()).
+-spec(needs_default_data/0 :: () -> boolean()).
 -spec(check_schema_integrity/0 :: () -> rabbit_types:ok_or_error(any())).
 -spec(clear_ram_only_tables/0 :: () -> 'ok').
 
@@ -70,7 +72,10 @@ wait_for_replicated() ->
                  not lists:member({local_content, true}, TabDef)]).
 
 wait(TableNames) ->
-    case mnesia:wait_for_tables(TableNames, 30000) of
+    %% We might be in ctl here for offline ops, in which case we can't
+    %% get_env() for the rabbit app.
+    Timeout = wait_timeout(),
+    case mnesia:wait_for_tables(TableNames, Timeout) of
         ok ->
             ok;
         {timeout, BadTabs} ->
@@ -79,13 +84,23 @@ wait(TableNames) ->
             throw({error, {failed_waiting_for_tables, Reason}})
     end.
 
+wait_timeout() ->
+    case application:get_env(rabbit, mnesia_table_loading_timeout) of
+        {ok, T}   -> T;
+        undefined -> 30000
+    end.
+
 force_load() -> [mnesia:force_load_table(T) || T <- names()], ok.
 
 is_present() -> names() -- mnesia:system_info(tables) =:= [].
 
-is_empty() ->
+is_empty()           -> is_empty(names()).
+needs_default_data() -> is_empty([rabbit_user, rabbit_user_permission,
+                                  rabbit_vhost]).
+
+is_empty(Names) ->
     lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end,
-              names()).
+              Names).
 
 check_schema_integrity() ->
     Tables = mnesia:system_info(tables),
diff --git a/rabbitmq-server/src/rabbit_tests.erl b/rabbitmq-server/src/rabbit_tests.erl
deleted file mode 100644 (file)
index da6938b..0000000
+++ /dev/null
@@ -1,2987 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_tests).
-
--compile([export_all]).
-
--export([all_tests/0]).
-
--import(rabbit_misc, [pget/2]).
-
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
--include_lib("kernel/include/file.hrl").
-
--define(PERSISTENT_MSG_STORE, msg_store_persistent).
--define(TRANSIENT_MSG_STORE,  msg_store_transient).
--define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
--define(TIMEOUT, 5000).
-
-all_tests() ->
-    ok = setup_cluster(),
-    ok = truncate:test(),
-    ok = supervisor2_tests:test_all(),
-    passed = gm_tests:all_tests(),
-    passed = mirrored_supervisor_tests:all_tests(),
-    application:set_env(rabbit, file_handles_high_watermark, 10, infinity),
-    ok = file_handle_cache:set_limit(10),
-    passed = test_version_equivalance(),
-    passed = test_file_handle_cache(),
-    passed = test_backing_queue(),
-    passed = test_rabbit_basic_header_handling(),
-    passed = test_priority_queue(),
-    passed = test_pg_local(),
-    passed = test_unfold(),
-    passed = test_supervisor_delayed_restart(),
-    passed = test_table_codec(),
-    passed = test_content_framing(),
-    passed = test_content_transcoding(),
-    passed = test_topic_matching(),
-    passed = test_log_management(),
-    passed = test_app_management(),
-    passed = test_log_management_during_startup(),
-    passed = test_statistics(),
-    passed = test_arguments_parser(),
-    passed = test_dynamic_mirroring(),
-    passed = test_user_management(),
-    passed = test_runtime_parameters(),
-    passed = test_policy_validation(),
-    passed = test_policy_opts_validation(),
-    passed = test_ha_policy_validation(),
-    passed = test_server_status(),
-    passed = test_amqp_connection_refusal(),
-    passed = test_confirms(),
-    passed = test_with_state(),
-    passed = test_mcall(),
-    passed =
-        do_if_secondary_node(
-          fun run_cluster_dependent_tests/1,
-          fun (SecondaryNode) ->
-                  io:format("Skipping cluster dependent tests with node ~p~n",
-                            [SecondaryNode]),
-                  passed
-          end),
-    passed = test_configurable_server_properties(),
-    passed = vm_memory_monitor_tests:all_tests(),
-    passed.
-
-
-do_if_secondary_node(Up, Down) ->
-    SecondaryNode = rabbit_nodes:make("hare"),
-
-    case net_adm:ping(SecondaryNode) of
-        pong -> Up(SecondaryNode);
-        pang -> Down(SecondaryNode)
-    end.
-
-setup_cluster() ->
-    do_if_secondary_node(
-      fun (SecondaryNode) ->
-              ok = control_action(stop_app, []),
-              ok = control_action(join_cluster,
-                                  [atom_to_list(SecondaryNode)]),
-              ok = control_action(start_app, []),
-              ok = control_action(start_app, SecondaryNode, [], [])
-      end,
-      fun (_) -> ok end).
-
-maybe_run_cluster_dependent_tests() ->
-    do_if_secondary_node(
-      fun (SecondaryNode) ->
-              passed = run_cluster_dependent_tests(SecondaryNode)
-      end,
-      fun (SecondaryNode) ->
-              io:format("Skipping cluster dependent tests with node ~p~n",
-                        [SecondaryNode])
-      end).
-
-run_cluster_dependent_tests(SecondaryNode) ->
-    io:format("Running cluster dependent tests with node ~p~n", [SecondaryNode]),
-    passed = test_delegates_async(SecondaryNode),
-    passed = test_delegates_sync(SecondaryNode),
-    passed = test_queue_cleanup(SecondaryNode),
-    passed = test_declare_on_dead_queue(SecondaryNode),
-    passed = test_refresh_events(SecondaryNode),
-
-    %% we now run the tests remotely, so that code coverage on the
-    %% local node picks up more of the delegate
-    Node = node(),
-    Self = self(),
-    Remote = spawn(SecondaryNode,
-                   fun () -> Rs = [ test_delegates_async(Node),
-                                    test_delegates_sync(Node),
-                                    test_queue_cleanup(Node),
-                                    test_declare_on_dead_queue(Node),
-                                    test_refresh_events(Node) ],
-                             Self ! {self(), Rs}
-                   end),
-    receive
-        {Remote, Result} ->
-            Result = lists:duplicate(length(Result), passed)
-    after 30000 ->
-            throw(timeout)
-    end,
-
-    passed.
-
-test_version_equivalance() ->
-    true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
-    true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
-    true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
-    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
-    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
-    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
-    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
-    passed.
-
-test_rabbit_basic_header_handling() ->
-    passed = write_table_with_invalid_existing_type_test(),
-    passed = invalid_existing_headers_test(),
-    passed = disparate_invalid_header_entries_accumulate_separately_test(),
-    passed = corrupt_or_invalid_headers_are_overwritten_test(),
-    passed = invalid_same_header_entry_accumulation_test(),
-    passed.
-
--define(XDEATH_TABLE,
-        [{<<"reason">>,       longstr,   <<"blah">>},
-         {<<"queue">>,        longstr,   <<"foo.bar.baz">>},
-         {<<"exchange">>,     longstr,   <<"my-exchange">>},
-         {<<"routing-keys">>, array,     []}]).
-
--define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
-
--define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
--define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
--define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
-
-write_table_with_invalid_existing_type_test() ->
-    prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]),
-    passed.
-
-invalid_existing_headers_test() ->
-    Headers =
-        prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
-    {array, [{table, ?ROUTE_TABLE}]} =
-        rabbit_misc:table_lookup(Headers, <<"header2">>),
-    passed.
-
-disparate_invalid_header_entries_accumulate_separately_test() ->
-    BadHeaders = [?BAD_HEADER("header2")],
-    Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
-    Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
-                             [?BAD_HEADER("header1") | Headers]),
-    {table, [?FOUND_BAD_HEADER("header1"),
-             ?FOUND_BAD_HEADER("header2")]} =
-        rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
-    passed.
-
-corrupt_or_invalid_headers_are_overwritten_test() ->
-    Headers0 = [?BAD_HEADER("header1"),
-                ?BAD_HEADER("x-invalid-headers")],
-    Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
-    {table,[?FOUND_BAD_HEADER("header1"),
-            ?FOUND_BAD_HEADER("x-invalid-headers")]} =
-        rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
-    passed.
-
-invalid_same_header_entry_accumulation_test() ->
-    BadHeader1 = ?BAD_HEADER2("header1", "a"),
-    Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
-    Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
-                             [?BAD_HEADER2("header1", "b") | Headers]),
-    {table, InvalidHeaders} =
-        rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
-    {array, [{longstr,<<"bad header1b">>},
-             {longstr,<<"bad header1a">>}]} =
-        rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
-    passed.
-
-prepend_check(HeaderKey, HeaderTable, Headers) ->
-    Headers1 = rabbit_basic:prepend_table_header(
-                HeaderKey, HeaderTable, Headers),
-    {table, Invalid} =
-        rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
-    {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
-    {array, [{Type, Value} | _]} =
-        rabbit_misc:table_lookup(Invalid, HeaderKey),
-    Headers1.
-
-test_priority_queue() ->
-
-    false = priority_queue:is_queue(not_a_queue),
-
-    %% empty Q
-    Q = priority_queue:new(),
-    {true, true, 0, [], []} = test_priority_queue(Q),
-
-    %% 1-4 element no-priority Q
-    true = lists:all(fun (X) -> X =:= passed end,
-                     lists:map(fun test_simple_n_element_queue/1,
-                               lists:seq(1, 4))),
-
-    %% 1-element priority Q
-    Q1 = priority_queue:in(foo, 1, priority_queue:new()),
-    {true, false, 1, [{1, foo}], [foo]} =
-        test_priority_queue(Q1),
-
-    %% 2-element same-priority Q
-    Q2 = priority_queue:in(bar, 1, Q1),
-    {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
-        test_priority_queue(Q2),
-
-    %% 2-element different-priority Q
-    Q3 = priority_queue:in(bar, 2, Q1),
-    {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
-        test_priority_queue(Q3),
-
-    %% 1-element negative priority Q
-    Q4 = priority_queue:in(foo, -1, priority_queue:new()),
-    {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
-
-    %% merge 2 * 1-element no-priority Qs
-    Q5 = priority_queue:join(priority_queue:in(foo, Q),
-                             priority_queue:in(bar, Q)),
-    {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
-        test_priority_queue(Q5),
-
-    %% merge 1-element no-priority Q with 1-element priority Q
-    Q6 = priority_queue:join(priority_queue:in(foo, Q),
-                             priority_queue:in(bar, 1, Q)),
-    {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
-        test_priority_queue(Q6),
-
-    %% merge 1-element priority Q with 1-element no-priority Q
-    Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
-                             priority_queue:in(bar, Q)),
-    {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
-        test_priority_queue(Q7),
-
-    %% merge 2 * 1-element same-priority Qs
-    Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
-                             priority_queue:in(bar, 1, Q)),
-    {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
-        test_priority_queue(Q8),
-
-    %% merge 2 * 1-element different-priority Qs
-    Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
-                             priority_queue:in(bar, 2, Q)),
-    {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
-        test_priority_queue(Q9),
-
-    %% merge 2 * 1-element different-priority Qs (other way around)
-    Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
-                              priority_queue:in(foo, 1, Q)),
-    {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
-        test_priority_queue(Q10),
-
-    %% merge 2 * 2-element multi-different-priority Qs
-    Q11 = priority_queue:join(Q6, Q5),
-    {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
-     [bar, foo, foo, bar]} = test_priority_queue(Q11),
-
-    %% and the other way around
-    Q12 = priority_queue:join(Q5, Q6),
-    {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
-     [bar, foo, bar, foo]} = test_priority_queue(Q12),
-
-    %% merge with negative priorities
-    Q13 = priority_queue:join(Q4, Q5),
-    {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
-        test_priority_queue(Q13),
-
-    %% and the other way around
-    Q14 = priority_queue:join(Q5, Q4),
-    {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
-        test_priority_queue(Q14),
-
-    %% joins with empty queues:
-    Q1 = priority_queue:join(Q, Q1),
-    Q1 = priority_queue:join(Q1, Q),
-
-    %% insert with priority into non-empty zero-priority queue
-    Q15 = priority_queue:in(baz, 1, Q5),
-    {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
-        test_priority_queue(Q15),
-
-    %% 1-element infinity priority Q
-    Q16 = priority_queue:in(foo, infinity, Q),
-    {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
-
-    %% add infinity to 0-priority Q
-    Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
-    {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
-        test_priority_queue(Q17),
-
-    %% and the other way around
-    Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
-    {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
-        test_priority_queue(Q18),
-
-    %% add infinity to mixed-priority Q
-    Q19 = priority_queue:in(qux, infinity, Q3),
-    {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
-        test_priority_queue(Q19),
-
-    %% merge the above with a negative priority Q
-    Q20 = priority_queue:join(Q19, Q4),
-    {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
-     [qux, bar, foo, foo]} = test_priority_queue(Q20),
-
-    %% merge two infinity priority queues
-    Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
-                              priority_queue:in(bar, infinity, Q)),
-    {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
-        test_priority_queue(Q21),
-
-    %% merge two mixed priority with infinity queues
-    Q22 = priority_queue:join(Q18, Q20),
-    {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
-                      {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
-        test_priority_queue(Q22),
-
-    passed.
-
-priority_queue_in_all(Q, L) ->
-    lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
-
-priority_queue_out_all(Q) ->
-    case priority_queue:out(Q) of
-        {empty, _}       -> [];
-        {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
-    end.
-
-test_priority_queue(Q) ->
-    {priority_queue:is_queue(Q),
-     priority_queue:is_empty(Q),
-     priority_queue:len(Q),
-     priority_queue:to_list(Q),
-     priority_queue_out_all(Q)}.
-
-test_simple_n_element_queue(N) ->
-    Items = lists:seq(1, N),
-    Q = priority_queue_in_all(priority_queue:new(), Items),
-    ToListRes = [{0, X} || X <- Items],
-    {true, false, N, ToListRes, Items} = test_priority_queue(Q),
-    passed.
-
-test_pg_local() ->
-    [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]],
-    check_pg_local(ok, [], []),
-    check_pg_local(pg_local:join(a, P), [P], []),
-    check_pg_local(pg_local:join(b, P), [P], [P]),
-    check_pg_local(pg_local:join(a, P), [P, P], [P]),
-    check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
-    check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
-    check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
-    check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
-    check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
-    check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
-    check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
-    [begin X ! done,
-           Ref = erlang:monitor(process, X),
-           receive {'DOWN', Ref, process, X, _Info} -> ok end
-     end  || X <- [P, Q]],
-    check_pg_local(ok, [], []),
-    passed.
-
-check_pg_local(ok, APids, BPids) ->
-    ok = pg_local:sync(),
-    [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
-                       {Key, Pids} <- [{a, APids}, {b, BPids}]].
-
-test_unfold() ->
-    {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
-    List = lists:seq(2,20,2),
-    {List, 0} = rabbit_misc:unfold(fun (0) -> false;
-                                       (N) -> {true, N*2, N-1}
-                                   end, 10),
-    passed.
-
-test_table_codec() ->
-    %% FIXME this does not test inexact numbers (double and float) yet,
-    %% because they won't pass the equality assertions
-    Table = [{<<"longstr">>,   longstr,   <<"Here is a long string">>},
-             {<<"signedint">>, signedint, 12345},
-             {<<"decimal">>,   decimal,   {3, 123456}},
-             {<<"timestamp">>, timestamp, 109876543209876},
-             {<<"table">>,     table,     [{<<"one">>, signedint, 54321},
-                                           {<<"two">>, longstr,
-                                            <<"A long string">>}]},
-             {<<"byte">>,      byte,      -128},
-             {<<"long">>,      long,      1234567890},
-             {<<"short">>,     short,     655},
-             {<<"bool">>,      bool,      true},
-             {<<"binary">>,    binary,    <<"a binary string">>},
-             {<<"void">>,      void,      undefined},
-             {<<"array">>,     array,     [{signedint, 54321},
-                                           {longstr, <<"A long string">>}]}
-            ],
-    Binary = <<
-               7,"longstr",   "S", 21:32, "Here is a long string",
-               9,"signedint", "I", 12345:32/signed,
-               7,"decimal",   "D", 3, 123456:32,
-               9,"timestamp", "T", 109876543209876:64,
-               5,"table",     "F", 31:32, % length of table
-               3,"one",       "I", 54321:32,
-               3,"two",       "S", 13:32, "A long string",
-               4,"byte",      "b", -128:8/signed,
-               4,"long",      "l", 1234567890:64,
-               5,"short",     "s", 655:16,
-               4,"bool",      "t", 1,
-               6,"binary",    "x", 15:32, "a binary string",
-               4,"void",      "V",
-               5,"array",     "A", 23:32,
-               "I", 54321:32,
-               "S", 13:32, "A long string"
-             >>,
-    Binary = rabbit_binary_generator:generate_table(Table),
-    Table  = rabbit_binary_parser:parse_table(Binary),
-    passed.
-
-%% Test that content frames don't exceed frame-max
-test_content_framing(FrameMax, BodyBin) ->
-    [Header | Frames] =
-        rabbit_binary_generator:build_simple_content_frames(
-          1,
-          rabbit_binary_generator:ensure_content_encoded(
-            rabbit_basic:build_content(#'P_basic'{}, BodyBin),
-            rabbit_framing_amqp_0_9_1),
-          FrameMax,
-          rabbit_framing_amqp_0_9_1),
-    %% header is formatted correctly and the size is the total of the
-    %% fragments
-    <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
-      BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
-    BodySize = size(BodyBin),
-    true = lists:all(
-             fun (ContentFrame) ->
-                     FrameBinary = list_to_binary(ContentFrame),
-                     %% assert
-                     <<_TypeAndChannel:3/binary,
-                       Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
-                         FrameBinary,
-                     size(FrameBinary) =< FrameMax
-             end, Frames),
-    passed.
-
-test_content_framing() ->
-    %% no content
-    passed = test_content_framing(4096, <<>>),
-    %% easily fit in one frame
-    passed = test_content_framing(4096, <<"Easy">>),
-    %% exactly one frame (empty frame = 8 bytes)
-    passed = test_content_framing(11, <<"One">>),
-    %% more than one frame
-    passed = test_content_framing(11, <<"More than one frame">>),
-    passed.
-
-test_content_transcoding() ->
-    %% there are no guarantees provided by 'clear' - it's just a hint
-    ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
-    ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
-    EnsureDecoded =
-        fun (C0) ->
-                C1 = rabbit_binary_parser:ensure_content_decoded(C0),
-                true = C1#content.properties =/= none,
-                C1
-        end,
-    EnsureEncoded =
-        fun (Protocol) ->
-                fun (C0) ->
-                        C1 = rabbit_binary_generator:ensure_content_encoded(
-                               C0, Protocol),
-                        true = C1#content.properties_bin =/= none,
-                        C1
-                end
-        end,
-    %% Beyond the assertions in Ensure*, the only testable guarantee
-    %% is that the operations should never fail.
-    %%
-    %% If we were using quickcheck we'd simply stuff all the above
-    %% into a generator for sequences of operations. In the absence of
-    %% quickcheck we pick particularly interesting sequences that:
-    %%
-    %% - execute every op twice since they are idempotent
-    %% - invoke clear_decoded, clear_encoded, decode and transcode
-    %%   with one or both of decoded and encoded content present
-    [begin
-         sequence_with_content([Op]),
-         sequence_with_content([ClearEncoded, Op]),
-         sequence_with_content([ClearDecoded, Op])
-     end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
-                   EnsureEncoded(rabbit_framing_amqp_0_9_1),
-                   EnsureEncoded(rabbit_framing_amqp_0_8)]],
-    passed.
-
-sequence_with_content(Sequence) ->
-    lists:foldl(fun (F, V) -> F(F(V)) end,
-                rabbit_binary_generator:ensure_content_encoded(
-                  rabbit_basic:build_content(#'P_basic'{}, <<>>),
-                  rabbit_framing_amqp_0_9_1),
-                Sequence).
-
-test_topic_matching() ->
-    XName = #resource{virtual_host = <<"/">>,
-                      kind = exchange,
-                      name = <<"test_exchange">>},
-    X0 = #exchange{name = XName, type = topic, durable = false,
-                   auto_delete = false, arguments = []},
-    X = rabbit_exchange_decorator:set(X0),
-    %% create
-    rabbit_exchange_type_topic:validate(X),
-    exchange_op_callback(X, create, []),
-
-    %% add some bindings
-    Bindings = [#binding{source = XName,
-                         key = list_to_binary(Key),
-                         destination = #resource{virtual_host = <<"/">>,
-                                                 kind = queue,
-                                                 name = list_to_binary(Q)},
-                         args = Args} ||
-                   {Key, Q, Args} <- [{"a.b.c",         "t1",  []},
-                                      {"a.*.c",         "t2",  []},
-                                      {"a.#.b",         "t3",  []},
-                                      {"a.b.b.c",       "t4",  []},
-                                      {"#",             "t5",  []},
-                                      {"#.#",           "t6",  []},
-                                      {"#.b",           "t7",  []},
-                                      {"*.*",           "t8",  []},
-                                      {"a.*",           "t9",  []},
-                                      {"*.b.c",         "t10", []},
-                                      {"a.#",           "t11", []},
-                                      {"a.#.#",         "t12", []},
-                                      {"b.b.c",         "t13", []},
-                                      {"a.b.b",         "t14", []},
-                                      {"a.b",           "t15", []},
-                                      {"b.c",           "t16", []},
-                                      {"",              "t17", []},
-                                      {"*.*.*",         "t18", []},
-                                      {"vodka.martini", "t19", []},
-                                      {"a.b.c",         "t20", []},
-                                      {"*.#",           "t21", []},
-                                      {"#.*.#",         "t22", []},
-                                      {"*.#.#",         "t23", []},
-                                      {"#.#.#",         "t24", []},
-                                      {"*",             "t25", []},
-                                      {"#.b.#",         "t26", []},
-                                      {"args-test",     "t27",
-                                       [{<<"foo">>, longstr, <<"bar">>}]},
-                                      {"args-test",     "t27", %% Note aliasing
-                                       [{<<"foo">>, longstr, <<"baz">>}]}]],
-    lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
-                  Bindings),
-
-    %% test some matches
-    test_topic_expect_match(
-      X, [{"a.b.c",               ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
-                                   "t18", "t20", "t21", "t22", "t23", "t24",
-                                   "t26"]},
-          {"a.b",                 ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
-                                   "t12", "t15", "t21", "t22", "t23", "t24",
-                                   "t26"]},
-          {"a.b.b",               ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
-                                   "t18", "t21", "t22", "t23", "t24", "t26"]},
-          {"",                    ["t5", "t6", "t17", "t24"]},
-          {"b.c.c",               ["t5", "t6", "t18", "t21", "t22", "t23",
-                                   "t24", "t26"]},
-          {"a.a.a.a.a",           ["t5", "t6", "t11", "t12", "t21", "t22",
-                                   "t23", "t24"]},
-          {"vodka.gin",           ["t5", "t6", "t8", "t21", "t22", "t23",
-                                   "t24"]},
-          {"vodka.martini",       ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
-                                   "t24"]},
-          {"b.b.c",               ["t5", "t6", "t10", "t13", "t18", "t21",
-                                   "t22", "t23", "t24", "t26"]},
-          {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
-          {"oneword",             ["t5", "t6", "t21", "t22", "t23", "t24",
-                                   "t25"]},
-          {"args-test",           ["t5", "t6", "t21", "t22", "t23", "t24",
-                                   "t25", "t27"]}]),
-    %% remove some bindings
-    RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
-                       lists:nth(11, Bindings), lists:nth(19, Bindings),
-                       lists:nth(21, Bindings), lists:nth(28, Bindings)],
-    exchange_op_callback(X, remove_bindings, [RemovedBindings]),
-    RemainingBindings = ordsets:to_list(
-                          ordsets:subtract(ordsets:from_list(Bindings),
-                                           ordsets:from_list(RemovedBindings))),
-
-    %% test some matches
-    test_topic_expect_match(
-      X,
-      [{"a.b.c",               ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
-                                "t23", "t24", "t26"]},
-       {"a.b",                 ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
-                                "t22", "t23", "t24", "t26"]},
-       {"a.b.b",               ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
-                                "t23", "t24", "t26"]},
-       {"",                    ["t6", "t17", "t24"]},
-       {"b.c.c",               ["t6", "t18", "t22", "t23", "t24", "t26"]},
-       {"a.a.a.a.a",           ["t6", "t12", "t22", "t23", "t24"]},
-       {"vodka.gin",           ["t6", "t8", "t22", "t23", "t24"]},
-       {"vodka.martini",       ["t6", "t8", "t22", "t23", "t24"]},
-       {"b.b.c",               ["t6", "t10", "t13", "t18", "t22", "t23",
-                                "t24", "t26"]},
-       {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
-       {"oneword",             ["t6", "t22", "t23", "t24", "t25"]},
-       {"args-test",           ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
-
-    %% remove the entire exchange
-    exchange_op_callback(X, delete, [RemainingBindings]),
-    %% none should match now
-    test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
-    passed.
-
-exchange_op_callback(X, Fun, Args) ->
-    rabbit_misc:execute_mnesia_transaction(
-      fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
-    rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
-
-test_topic_expect_match(X, List) ->
-    lists:foreach(
-      fun ({Key, Expected}) ->
-              BinKey = list_to_binary(Key),
-              Message = rabbit_basic:message(X#exchange.name, BinKey,
-                                             #'P_basic'{}, <<>>),
-              Res = rabbit_exchange_type_topic:route(
-                      X, #delivery{mandatory = false,
-                                   sender    = self(),
-                                   message   = Message}),
-              ExpectedRes = lists:map(
-                              fun (Q) -> #resource{virtual_host = <<"/">>,
-                                                   kind = queue,
-                                                   name = list_to_binary(Q)}
-                              end, Expected),
-              true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
-      end, List).
-
-test_app_management() ->
-    control_action(wait, [rabbit_mnesia:dir() ++ ".pid"]),
-    %% Starting, stopping and diagnostics.  Note that we don't try
-    %% 'report' when the rabbit app is stopped and that we enable
-    %% tracing for the duration of this function.
-    ok = control_action(trace_on, []),
-    ok = control_action(stop_app, []),
-    ok = control_action(stop_app, []),
-    ok = control_action(status, []),
-    ok = control_action(cluster_status, []),
-    ok = control_action(environment, []),
-    ok = control_action(start_app, []),
-    ok = control_action(start_app, []),
-    ok = control_action(status, []),
-    ok = control_action(report, []),
-    ok = control_action(cluster_status, []),
-    ok = control_action(environment, []),
-    ok = control_action(trace_off, []),
-    passed.
-
-test_log_management() ->
-    MainLog = rabbit:log_location(kernel),
-    SaslLog = rabbit:log_location(sasl),
-    Suffix = ".1",
-
-    %% prepare basic logs
-    file:delete([MainLog, Suffix]),
-    file:delete([SaslLog, Suffix]),
-
-    %% simple logs reopening
-    ok = control_action(rotate_logs, []),
-    [true, true] = empty_files([MainLog, SaslLog]),
-    ok = test_logs_working(MainLog, SaslLog),
-
-    %% simple log rotation
-    ok = control_action(rotate_logs, [Suffix]),
-    [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
-    [true, true] = empty_files([MainLog, SaslLog]),
-    ok = test_logs_working(MainLog, SaslLog),
-
-    %% reopening logs with log rotation performed first
-    ok = clean_logs([MainLog, SaslLog], Suffix),
-    ok = control_action(rotate_logs, []),
-    ok = file:rename(MainLog, [MainLog, Suffix]),
-    ok = file:rename(SaslLog, [SaslLog, Suffix]),
-    ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]),
-    ok = control_action(rotate_logs, []),
-    ok = test_logs_working(MainLog, SaslLog),
-
-    %% log rotation on empty files (the main log will have a ctl action logged)
-    ok = clean_logs([MainLog, SaslLog], Suffix),
-    ok = control_action(rotate_logs, []),
-    ok = control_action(rotate_logs, [Suffix]),
-    [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
-
-    %% logs with suffix are not writable
-    ok = control_action(rotate_logs, [Suffix]),
-    ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]),
-    ok = control_action(rotate_logs, [Suffix]),
-    ok = test_logs_working(MainLog, SaslLog),
-
-    %% rotate when original log files are not writable
-    ok = make_files_non_writable([MainLog, SaslLog]),
-    ok = control_action(rotate_logs, []),
-
-    %% logging directed to tty (first, remove handlers)
-    ok = delete_log_handlers([rabbit_sasl_report_file_h,
-                              rabbit_error_logger_file_h]),
-    ok = clean_logs([MainLog, SaslLog], Suffix),
-    ok = application:set_env(rabbit, sasl_error_logger, tty),
-    ok = application:set_env(rabbit, error_logger, tty),
-    ok = control_action(rotate_logs, []),
-    [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
-
-    %% rotate logs when logging is turned off
-    ok = application:set_env(rabbit, sasl_error_logger, false),
-    ok = application:set_env(rabbit, error_logger, silent),
-    ok = control_action(rotate_logs, []),
-    [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
-
-    %% cleanup
-    ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
-    ok = application:set_env(rabbit, error_logger, {file, MainLog}),
-    ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog},
-                           {rabbit_sasl_report_file_h, SaslLog}]),
-    passed.
-
-test_log_management_during_startup() ->
-    MainLog = rabbit:log_location(kernel),
-    SaslLog = rabbit:log_location(sasl),
-
-    %% start application with simple tty logging
-    ok = control_action(stop_app, []),
-    ok = application:set_env(rabbit, error_logger, tty),
-    ok = application:set_env(rabbit, sasl_error_logger, tty),
-    ok = add_log_handlers([{error_logger_tty_h, []},
-                           {sasl_report_tty_h, []}]),
-    ok = control_action(start_app, []),
-
-    %% start application with tty logging and
-    %% proper handlers not installed
-    ok = control_action(stop_app, []),
-    ok = error_logger:tty(false),
-    ok = delete_log_handlers([sasl_report_tty_h]),
-    ok = case catch control_action(start_app, []) of
-             ok -> exit({got_success_but_expected_failure,
-                         log_rotation_tty_no_handlers_test});
-             {badrpc, {'EXIT', {rabbit,failure_during_boot,
-               {error,{cannot_log_to_tty,
-                       _, not_installed}}}}} -> ok
-         end,
-
-    %% fix sasl logging
-    ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
-
-    %% start application with logging to non-existing directory
-    TmpLog = "/tmp/rabbit-tests/test.log",
-    delete_file(TmpLog),
-    ok = control_action(stop_app, []),
-    ok = application:set_env(rabbit, error_logger, {file, TmpLog}),
-
-    ok = delete_log_handlers([rabbit_error_logger_file_h]),
-    ok = add_log_handlers([{error_logger_file_h, MainLog}]),
-    ok = control_action(start_app, []),
-
-    %% start application with logging to directory with no
-    %% write permissions
-    ok = control_action(stop_app, []),
-    TmpDir = "/tmp/rabbit-tests",
-    ok = set_permissions(TmpDir, 8#00400),
-    ok = delete_log_handlers([rabbit_error_logger_file_h]),
-    ok = add_log_handlers([{error_logger_file_h, MainLog}]),
-    ok = case control_action(start_app, []) of
-             ok -> exit({got_success_but_expected_failure,
-                         log_rotation_no_write_permission_dir_test});
-             {badrpc, {'EXIT',
-               {rabbit, failure_during_boot,
-                {error, {cannot_log_to_file, _, _}}}}} -> ok
-         end,
-
-    %% start application with logging to a subdirectory which
-    %% parent directory has no write permissions
-    ok = control_action(stop_app, []),
-    TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log",
-    ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}),
-    ok = add_log_handlers([{error_logger_file_h, MainLog}]),
-    ok = case control_action(start_app, []) of
-             ok -> exit({got_success_but_expected_failure,
-                         log_rotatation_parent_dirs_test});
-             {badrpc,
-              {'EXIT', {rabbit,failure_during_boot,
-                {error, {cannot_log_to_file, _,
-                  {error,
-                   {cannot_create_parent_dirs, _, eacces}}}}}}} -> ok
-         end,
-    ok = set_permissions(TmpDir, 8#00700),
-    ok = set_permissions(TmpLog, 8#00600),
-    ok = delete_file(TmpLog),
-    ok = file:del_dir(TmpDir),
-
-    %% start application with standard error_logger_file_h
-    %% handler not installed
-    ok = control_action(stop_app, []),
-    ok = application:set_env(rabbit, error_logger, {file, MainLog}),
-    ok = control_action(start_app, []),
-
-    %% start application with standard sasl handler not installed
-    %% and rabbit main log handler installed correctly
-    ok = control_action(stop_app, []),
-    ok = delete_log_handlers([rabbit_sasl_report_file_h]),
-    ok = control_action(start_app, []),
-    passed.
-
-test_arguments_parser() ->
-    GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}],
-    Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}],
-
-    GetOptions =
-        fun (Args) ->
-                rabbit_misc:parse_arguments(Commands1, GlobalOpts1, Args)
-        end,
-
-    check_parse_arguments(no_command, GetOptions, []),
-    check_parse_arguments(no_command, GetOptions, ["foo", "bar"]),
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}},
-      GetOptions, ["command1"]),
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
-      GetOptions, ["command1", "-o1", "blah"]),
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}},
-      GetOptions, ["command1", "-f1"]),
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
-      GetOptions, ["-o1", "blah", "command1"]),
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}},
-      GetOptions, ["-o1", "blah", "command1", "quux"]),
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}},
-      GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]),
-    %% For duplicate flags, the last one counts
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}},
-      GetOptions, ["-o1", "first", "command1", "-o1", "second"]),
-    %% If the flag "eats" the command, the command won't be recognised
-    check_parse_arguments(no_command, GetOptions,
-                      ["-o1", "command1", "quux"]),
-    %% If a flag eats another flag, the eaten flag won't be recognised
-    check_parse_arguments(
-      {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}},
-      GetOptions, ["command1", "-o1", "-f1"]),
-
-    %% Now for some command-specific flags...
-    check_parse_arguments(
-      {ok, {command2, [{"-f1", false}, {"-f2", false},
-                       {"-o1", "foo"}, {"-o2", "bar"}], []}},
-      GetOptions, ["command2"]),
-
-    check_parse_arguments(
-      {ok, {command2, [{"-f1", false}, {"-f2", true},
-                       {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}},
-      GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]),
-
-    passed.
-
-test_dynamic_mirroring() ->
-    %% Just unit tests of the node selection logic, see multi node
-    %% tests for the rest...
-    Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
-                {MNode, SNodes, SSNodes}, All) ->
-                   {ok, M} = rabbit_mirror_queue_misc:module(Policy),
-                   {NewM, NewSs0} = M:suggested_queue_nodes(
-                                      Params, MNode, SNodes, SSNodes, All),
-                   NewSs1 = lists:sort(NewSs0),
-                   case dm_list_match(NewSs, NewSs1, ExtraSs) of
-                       ok    -> ok;
-                       error -> exit({no_match, NewSs, NewSs1, ExtraSs})
-                   end
-           end,
-
-    Test({a,[b,c],0},<<"all">>,'_',{a,[],   []},   [a,b,c]),
-    Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
-    Test({a,[b,c],0},<<"all">>,'_',{a,[d],  [d]},  [a,b,c]),
-
-    N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
-
-    %% Add a node
-    Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
-    Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
-    %% Add two nodes and drop one
-    Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
-    %% Don't try to include nodes that are not running
-    Test({a,[b],  0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
-    %% If we can't find any of the nodes listed then just keep the master
-    Test({a,[],   0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
-    %% And once that's happened, still keep the master even when not listed,
-    %% if nothing is synced
-    Test({a,[b,c],0},<<"nodes">>,N([b,c]),  {a,[], []}, [a,b,c,d]),
-    Test({a,[b,c],0},<<"nodes">>,N([b,c]),  {a,[b],[]}, [a,b,c,d]),
-    %% But if something is synced we can lose the master - but make
-    %% sure we pick the new master from the nodes which are synced!
-    Test({b,[c],  0},<<"nodes">>,N([b,c]),  {a,[b],[b]},[a,b,c,d]),
-    Test({b,[c],  0},<<"nodes">>,N([c,b]),  {a,[b],[b]},[a,b,c,d]),
-
-    Test({a,[],   1},<<"exactly">>,2,{a,[],   []},   [a,b,c,d]),
-    Test({a,[],   2},<<"exactly">>,3,{a,[],   []},   [a,b,c,d]),
-    Test({a,[c],  0},<<"exactly">>,2,{a,[c],  [c]},  [a,b,c,d]),
-    Test({a,[c],  1},<<"exactly">>,3,{a,[c],  [c]},  [a,b,c,d]),
-    Test({a,[c],  0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
-    Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
-
-    passed.
-
-%% Does the first list match the second where the second is required
-%% to have exactly Extra superfluous items?
-dm_list_match([],     [],      0)     -> ok;
-dm_list_match(_,      [],     _Extra) -> error;
-dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
-dm_list_match(L1,     [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
-
-test_user_management() ->
-
-    %% lots if stuff that should fail
-    {error, {no_such_user, _}} =
-        control_action(delete_user, ["foo"]),
-    {error, {no_such_user, _}} =
-        control_action(change_password, ["foo", "baz"]),
-    {error, {no_such_vhost, _}} =
-        control_action(delete_vhost, ["/testhost"]),
-    {error, {no_such_user, _}} =
-        control_action(set_permissions, ["foo", ".*", ".*", ".*"]),
-    {error, {no_such_user, _}} =
-        control_action(clear_permissions, ["foo"]),
-    {error, {no_such_user, _}} =
-        control_action(list_user_permissions, ["foo"]),
-    {error, {no_such_vhost, _}} =
-        control_action(list_permissions, [], [{"-p", "/testhost"}]),
-    {error, {invalid_regexp, _, _}} =
-        control_action(set_permissions, ["guest", "+foo", ".*", ".*"]),
-    {error, {no_such_user, _}} =
-        control_action(set_user_tags, ["foo", "bar"]),
-
-    %% user creation
-    ok = control_action(add_user, ["foo", "bar"]),
-    {error, {user_already_exists, _}} =
-        control_action(add_user, ["foo", "bar"]),
-    ok = control_action(clear_password, ["foo"]),
-    ok = control_action(change_password, ["foo", "baz"]),
-
-    TestTags = fun (Tags) ->
-                       Args = ["foo" | [atom_to_list(T) || T <- Tags]],
-                       ok = control_action(set_user_tags, Args),
-                       {ok, #internal_user{tags = Tags}} =
-                           rabbit_auth_backend_internal:lookup_user(<<"foo">>),
-                       ok = control_action(list_users, [])
-               end,
-    TestTags([foo, bar, baz]),
-    TestTags([administrator]),
-    TestTags([]),
-
-    %% vhost creation
-    ok = control_action(add_vhost, ["/testhost"]),
-    {error, {vhost_already_exists, _}} =
-        control_action(add_vhost, ["/testhost"]),
-    ok = control_action(list_vhosts, []),
-
-    %% user/vhost mapping
-    ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
-                        [{"-p", "/testhost"}]),
-    ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
-                        [{"-p", "/testhost"}]),
-    ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
-                        [{"-p", "/testhost"}]),
-    ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
-    ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
-    ok = control_action(list_user_permissions, ["foo"]),
-
-    %% user/vhost unmapping
-    ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
-    ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
-
-    %% vhost deletion
-    ok = control_action(delete_vhost, ["/testhost"]),
-    {error, {no_such_vhost, _}} =
-        control_action(delete_vhost, ["/testhost"]),
-
-    %% deleting a populated vhost
-    ok = control_action(add_vhost, ["/testhost"]),
-    ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
-                        [{"-p", "/testhost"}]),
-    {new, _} = rabbit_amqqueue:declare(
-                 rabbit_misc:r(<<"/testhost">>, queue, <<"test">>),
-                 true, false, [], none),
-    ok = control_action(delete_vhost, ["/testhost"]),
-
-    %% user deletion
-    ok = control_action(delete_user, ["foo"]),
-    {error, {no_such_user, _}} =
-        control_action(delete_user, ["foo"]),
-
-    passed.
-
-test_runtime_parameters() ->
-    rabbit_runtime_parameters_test:register(),
-    Good = fun(L) -> ok                = control_action(set_parameter, L) end,
-    Bad  = fun(L) -> {error_string, _} = control_action(set_parameter, L) end,
-
-    %% Acceptable for bijection
-    Good(["test", "good", "\"ignore\""]),
-    Good(["test", "good", "123"]),
-    Good(["test", "good", "true"]),
-    Good(["test", "good", "false"]),
-    Good(["test", "good", "null"]),
-    Good(["test", "good", "{\"key\": \"value\"}"]),
-
-    %% Invalid json
-    Bad(["test", "good", "atom"]),
-    Bad(["test", "good", "{\"foo\": \"bar\""]),
-    Bad(["test", "good", "{foo: \"bar\"}"]),
-
-    %% Test actual validation hook
-    Good(["test", "maybe", "\"good\""]),
-    Bad(["test", "maybe", "\"bad\""]),
-    Good(["test", "admin", "\"ignore\""]), %% ctl means 'user' -> none
-
-    ok = control_action(list_parameters, []),
-
-    ok = control_action(clear_parameter, ["test", "good"]),
-    ok = control_action(clear_parameter, ["test", "maybe"]),
-    ok = control_action(clear_parameter, ["test", "admin"]),
-    {error_string, _} =
-        control_action(clear_parameter, ["test", "neverexisted"]),
-
-    %% We can delete for a component that no longer exists
-    Good(["test", "good", "\"ignore\""]),
-    rabbit_runtime_parameters_test:unregister(),
-    ok = control_action(clear_parameter, ["test", "good"]),
-    passed.
-
-test_policy_validation() ->
-    rabbit_runtime_parameters_test:register_policy_validator(),
-    SetPol = fun (Key, Val) ->
-                     control_action_opts(
-                       ["set_policy", "name", ".*",
-                        rabbit_misc:format("{\"~s\":~p}", [Key, Val])])
-             end,
-
-    ok    = SetPol("testeven", []),
-    ok    = SetPol("testeven", [1, 2]),
-    ok    = SetPol("testeven", [1, 2, 3, 4]),
-    ok    = SetPol("testpos",  [2, 5, 5678]),
-
-    error = SetPol("testpos",  [-1, 0, 1]),
-    error = SetPol("testeven", [ 1, 2, 3]),
-
-    ok = control_action(clear_policy, ["name"]),
-    rabbit_runtime_parameters_test:unregister_policy_validator(),
-    passed.
-
-test_policy_opts_validation() ->
-    Set  = fun (Extra) -> control_action_opts(
-                            ["set_policy", "name", ".*", "{\"ha-mode\":\"all\"}"
-                             | Extra]) end,
-    OK   = fun (Extra) -> ok = Set(Extra) end,
-    Fail = fun (Extra) -> error = Set(Extra) end,
-
-    OK  ([]),
-
-    OK  (["--priority", "0"]),
-    OK  (["--priority", "3"]),
-    Fail(["--priority", "banana"]),
-    Fail(["--priority"]),
-
-    OK  (["--apply-to", "all"]),
-    OK  (["--apply-to", "queues"]),
-    Fail(["--apply-to", "bananas"]),
-    Fail(["--apply-to"]),
-
-    OK  (["--priority", "3",      "--apply-to", "queues"]),
-    Fail(["--priority", "banana", "--apply-to", "queues"]),
-    Fail(["--priority", "3",      "--apply-to", "bananas"]),
-
-    Fail(["--offline"]),
-
-    ok = control_action(clear_policy, ["name"]),
-    passed.
-
-test_ha_policy_validation() ->
-    Set  = fun (JSON) -> control_action_opts(
-                           ["set_policy", "name", ".*", JSON]) end,
-    OK   = fun (JSON) -> ok = Set(JSON) end,
-    Fail = fun (JSON) -> error = Set(JSON) end,
-
-    OK  ("{\"ha-mode\":\"all\"}"),
-    Fail("{\"ha-mode\":\"made_up\"}"),
-
-    Fail("{\"ha-mode\":\"nodes\"}"),
-    Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"),
-    Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"),
-    OK  ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}"),
-    Fail("{\"ha-params\":[\"a\",\"b\"]}"),
-
-    Fail("{\"ha-mode\":\"exactly\"}"),
-    Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"),
-    OK  ("{\"ha-mode\":\"exactly\",\"ha-params\":2}"),
-    Fail("{\"ha-params\":2}"),
-
-    OK  ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}"),
-    OK  ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}"),
-    Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"),
-    Fail("{\"ha-sync-mode\":\"manual\"}"),
-    Fail("{\"ha-sync-mode\":\"automatic\"}"),
-
-    ok = control_action(clear_policy, ["name"]),
-    passed.
-
-test_server_status() ->
-    %% create a few things so there is some useful information to list
-    {_Writer, Limiter, Ch} = test_channel(),
-    [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>],
-                        {new, Queue = #amqqueue{}} <-
-                            [rabbit_amqqueue:declare(
-                               rabbit_misc:r(<<"/">>, queue, Name),
-                               false, false, [], none)]],
-    ok = rabbit_amqqueue:basic_consume(
-           Q, true, Ch, Limiter, false, 0, <<"ctag">>, true, [], undefined),
-
-    %% list queues
-    ok = info_action(list_queues, rabbit_amqqueue:info_keys(), true),
-
-    %% list exchanges
-    ok = info_action(list_exchanges, rabbit_exchange:info_keys(), true),
-
-    %% list bindings
-    ok = info_action(list_bindings, rabbit_binding:info_keys(), true),
-    %% misc binding listing APIs
-    [_|_] = rabbit_binding:list_for_source(
-              rabbit_misc:r(<<"/">>, exchange, <<"">>)),
-    [_] = rabbit_binding:list_for_destination(
-            rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
-    [_] = rabbit_binding:list_for_source_and_destination(
-            rabbit_misc:r(<<"/">>, exchange, <<"">>),
-            rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
-
-    %% list connections
-    {H, P} = find_listener(),
-    {ok, C} = gen_tcp:connect(H, P, []),
-    gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>),
-    timer:sleep(100),
-    ok = info_action(list_connections,
-                     rabbit_networking:connection_info_keys(), false),
-    %% close_connection
-    [ConnPid] = rabbit_networking:connections(),
-    ok = control_action(close_connection, [rabbit_misc:pid_to_string(ConnPid),
-                                           "go away"]),
-
-    %% list channels
-    ok = info_action(list_channels, rabbit_channel:info_keys(), false),
-
-    %% list consumers
-    ok = control_action(list_consumers, []),
-
-    %% set vm memory high watermark
-    HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
-    ok = control_action(set_vm_memory_high_watermark, ["1"]),
-    ok = control_action(set_vm_memory_high_watermark, ["1.0"]),
-    %% this will trigger an alarm
-    ok = control_action(set_vm_memory_high_watermark, ["0.0"]),
-    %% reset
-    ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
-
-    %% eval
-    {error_string, _} = control_action(eval, ["\""]),
-    {error_string, _} = control_action(eval, ["a("]),
-    ok = control_action(eval, ["a."]),
-
-    %% cleanup
-    [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
-
-    unlink(Ch),
-    ok = rabbit_channel:shutdown(Ch),
-
-    passed.
-
-test_amqp_connection_refusal() ->
-    [passed = test_amqp_connection_refusal(V) ||
-        V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
-    passed.
-
-test_amqp_connection_refusal(Header) ->
-    {H, P} = find_listener(),
-    {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
-    ok = gen_tcp:send(C, Header),
-    {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
-    ok = gen_tcp:close(C),
-    passed.
-
-find_listener() ->
-    [#listener{host = H, port = P} | _] =
-        [L || L = #listener{node = N, protocol = amqp}
-                  <- rabbit_networking:active_listeners(),
-              N =:= node()],
-    {H, P}.
-
-test_writer(Pid) ->
-    receive
-        {'$gen_call', From, flush} -> gen_server:reply(From, ok),
-                                      test_writer(Pid);
-        {send_command, Method}     -> Pid ! Method,
-                                      test_writer(Pid);
-        shutdown                   -> ok
-    end.
-
-test_channel() ->
-    Me = self(),
-    Writer = spawn(fun () -> test_writer(Me) end),
-    {ok, Limiter} = rabbit_limiter:start_link(no_id),
-    {ok, Ch} = rabbit_channel:start_link(
-                 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1,
-                 user(<<"guest">>), <<"/">>, [], Me, Limiter),
-    {Writer, Limiter, Ch}.
-
-test_spawn() ->
-    {Writer, _Limiter, Ch} = test_channel(),
-    ok = rabbit_channel:do(Ch, #'channel.open'{}),
-    receive #'channel.open_ok'{} -> ok
-    after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
-    end,
-    {Writer, Ch}.
-
-test_spawn(Node) ->
-    rpc:call(Node, ?MODULE, test_spawn_remote, []).
-
-%% Spawn an arbitrary long lived process, so we don't end up linking
-%% the channel to the short-lived process (RPC, here) spun up by the
-%% RPC server.
-test_spawn_remote() ->
-    RPC = self(),
-    spawn(fun () ->
-                  {Writer, Ch} = test_spawn(),
-                  RPC ! {Writer, Ch},
-                  link(Ch),
-                  receive
-                      _ -> ok
-                  end
-          end),
-    receive Res -> Res
-    after ?TIMEOUT  -> throw(failed_to_receive_result)
-    end.
-
-user(Username) ->
-    #user{username     = Username,
-          tags         = [administrator],
-          auth_backend = rabbit_auth_backend_internal,
-          impl         = #internal_user{username = Username,
-                                        tags     = [administrator]}}.
-
-test_confirms() ->
-    {_Writer, Ch} = test_spawn(),
-    DeclareBindDurableQueue =
-        fun() ->
-                rabbit_channel:do(Ch, #'queue.declare'{durable = true}),
-                receive #'queue.declare_ok'{queue = Q0} ->
-                        rabbit_channel:do(Ch, #'queue.bind'{
-                                            queue = Q0,
-                                            exchange = <<"amq.direct">>,
-                                            routing_key = "magic" }),
-                        receive #'queue.bind_ok'{} -> Q0
-                        after ?TIMEOUT -> throw(failed_to_bind_queue)
-                        end
-                after ?TIMEOUT -> throw(failed_to_declare_queue)
-                end
-        end,
-    %% Declare and bind two queues
-    QName1 = DeclareBindDurableQueue(),
-    QName2 = DeclareBindDurableQueue(),
-    %% Get the first one's pid (we'll crash it later)
-    {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
-    QPid1 = Q1#amqqueue.pid,
-    %% Enable confirms
-    rabbit_channel:do(Ch, #'confirm.select'{}),
-    receive
-        #'confirm.select_ok'{} -> ok
-    after ?TIMEOUT -> throw(failed_to_enable_confirms)
-    end,
-    %% Publish a message
-    rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
-                                           routing_key = "magic"
-                                          },
-                      rabbit_basic:build_content(
-                        #'P_basic'{delivery_mode = 2}, <<"">>)),
-    %% We must not kill the queue before the channel has processed the
-    %% 'publish'.
-    ok = rabbit_channel:flush(Ch),
-    %% Crash the queue
-    QPid1 ! boom,
-    %% Wait for a nack
-    receive
-        #'basic.nack'{} -> ok;
-        #'basic.ack'{}  -> throw(received_ack_instead_of_nack)
-    after ?TIMEOUT-> throw(did_not_receive_nack)
-    end,
-    receive
-        #'basic.ack'{} -> throw(received_ack_when_none_expected)
-    after 1000 -> ok
-    end,
-    %% Cleanup
-    rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}),
-    receive
-        #'queue.delete_ok'{} -> ok
-    after ?TIMEOUT -> throw(failed_to_cleanup_queue)
-    end,
-    unlink(Ch),
-    ok = rabbit_channel:shutdown(Ch),
-
-    passed.
-
-test_with_state() ->
-    fhc_state = gen_server2:with_state(file_handle_cache,
-                                       fun (S) -> element(1, S) end),
-    passed.
-
-test_mcall() ->
-    P1 = spawn(fun gs2_test_listener/0),
-    register(foo, P1),
-    global:register_name(gfoo, P1),
-
-    P2 = spawn(fun() -> exit(bang) end),
-    %% ensure P2 is dead (ignore the race setting up the monitor)
-    await_exit(P2),
-
-    P3 = spawn(fun gs2_test_crasher/0),
-
-    %% since P2 crashes almost immediately and P3 after receiving its first
-    %% message, we have to spawn a few more processes to handle the additional
-    %% cases we're interested in here
-    register(baz, spawn(fun gs2_test_crasher/0)),
-    register(bog, spawn(fun gs2_test_crasher/0)),
-    global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
-
-    NoNode = rabbit_nodes:make("nonode"),
-
-    Targets =
-        %% pids
-        [P1, P2, P3]
-        ++
-        %% registered names
-        [foo, bar, baz]
-        ++
-        %% {Name, Node} pairs
-        [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
-        ++
-        %% {global, Name}
-        [{global, gfoo}, {global, gbar}, {global, gbaz}],
-
-    GoodResults = [{D, goodbye} || D <- [P1, foo,
-                                         {foo, node()},
-                                         {global, gfoo}]],
-
-    BadResults  = [{P2,             noproc},   % died before use
-                   {P3,             boom},     % died on first use
-                   {bar,            noproc},   % never registered
-                   {baz,            boom},     % died on first use
-                   {{bar, node()},  noproc},   % never registered
-                   {{bog, node()},  boom},     % died on first use
-                   {{foo, NoNode},  nodedown}, % invalid node
-                   {{global, gbar}, noproc},   % never registered globally
-                   {{global, gbaz}, boom}],    % died on first use
-
-    {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
-    true = lists:sort(Replies) == lists:sort(GoodResults),
-    true = lists:sort(Errors)  == lists:sort(BadResults),
-
-    %% cleanup (ignore the race setting up the monitor)
-    P1 ! stop,
-    await_exit(P1),
-    passed.
-
-await_exit(Pid) ->
-    MRef = erlang:monitor(process, Pid),
-    receive
-        {'DOWN', MRef, _, _, _} -> ok
-    end.
-
-gs2_test_crasher() ->
-    receive
-        {'$gen_call', _From, hello} -> exit(boom)
-    end.
-
-gs2_test_listener() ->
-    receive
-        {'$gen_call', From, hello} ->
-            gen_server2:reply(From, goodbye),
-            gs2_test_listener();
-        stop ->
-            ok
-    end.
-
-test_statistics_event_receiver(Pid) ->
-    receive
-        Foo -> Pid ! Foo, test_statistics_event_receiver(Pid)
-    end.
-
-test_statistics_receive_event(Ch, Matcher) ->
-    rabbit_channel:flush(Ch),
-    Ch ! emit_stats,
-    test_statistics_receive_event1(Ch, Matcher).
-
-test_statistics_receive_event1(Ch, Matcher) ->
-    receive #event{type = channel_stats, props = Props} ->
-            case Matcher(Props) of
-                true -> Props;
-                _    -> test_statistics_receive_event1(Ch, Matcher)
-            end
-    after ?TIMEOUT -> throw(failed_to_receive_event)
-    end.
-
-test_statistics() ->
-    application:set_env(rabbit, collect_statistics, fine),
-
-    %% ATM this just tests the queue / exchange stats in channels. That's
-    %% by far the most complex code though.
-
-    %% Set up a channel and queue
-    {_Writer, Ch} = test_spawn(),
-    rabbit_channel:do(Ch, #'queue.declare'{}),
-    QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
-            after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
-            end,
-    QRes = rabbit_misc:r(<<"/">>, queue, QName),
-    X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
-
-    rabbit_tests_event_receiver:start(self(), [node()], [channel_stats]),
-
-    %% Check stats empty
-    Event = test_statistics_receive_event(Ch, fun (_) -> true end),
-    [] = proplists:get_value(channel_queue_stats, Event),
-    [] = proplists:get_value(channel_exchange_stats, Event),
-    [] = proplists:get_value(channel_queue_exchange_stats, Event),
-
-    %% Publish and get a message
-    rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
-                                           routing_key = QName},
-                      rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
-    rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
-
-    %% Check the stats reflect that
-    Event2 = test_statistics_receive_event(
-               Ch,
-               fun (E) ->
-                       length(proplists:get_value(
-                                channel_queue_exchange_stats, E)) > 0
-               end),
-    [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats,    Event2),
-    [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2),
-    [{{QRes,X},[{publish,1}]}] =
-        proplists:get_value(channel_queue_exchange_stats, Event2),
-
-    %% Check the stats remove stuff on queue deletion
-    rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
-    Event3 = test_statistics_receive_event(
-               Ch,
-               fun (E) ->
-                       length(proplists:get_value(
-                                channel_queue_exchange_stats, E)) == 0
-               end),
-
-    [] = proplists:get_value(channel_queue_stats, Event3),
-    [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3),
-    [] = proplists:get_value(channel_queue_exchange_stats, Event3),
-
-    rabbit_channel:shutdown(Ch),
-    rabbit_tests_event_receiver:stop(),
-    passed.
-
-test_refresh_events(SecondaryNode) ->
-    rabbit_tests_event_receiver:start(self(), [node(), SecondaryNode],
-                                      [channel_created, queue_created]),
-
-    {_Writer, Ch} = test_spawn(),
-    expect_events(pid, Ch, channel_created),
-    rabbit_channel:shutdown(Ch),
-
-    {_Writer2, Ch2} = test_spawn(SecondaryNode),
-    expect_events(pid, Ch2, channel_created),
-    rabbit_channel:shutdown(Ch2),
-
-    {new, #amqqueue{name = QName} = Q} =
-        rabbit_amqqueue:declare(test_queue(), false, false, [], none),
-    expect_events(name, QName, queue_created),
-    rabbit_amqqueue:delete(Q, false, false),
-
-    rabbit_tests_event_receiver:stop(),
-    passed.
-
-expect_events(Tag, Key, Type) ->
-    expect_event(Tag, Key, Type),
-    rabbit:force_event_refresh(make_ref()),
-    expect_event(Tag, Key, Type).
-
-expect_event(Tag, Key, Type) ->
-    receive #event{type = Type, props = Props} ->
-            case pget(Tag, Props) of
-                Key -> ok;
-                _   -> expect_event(Tag, Key, Type)
-            end
-    after ?TIMEOUT -> throw({failed_to_receive_event, Type})
-    end.
-
-test_delegates_async(SecondaryNode) ->
-    Self = self(),
-    Sender = fun (Pid) -> Pid ! {invoked, Self} end,
-
-    Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
-
-    ok = delegate:invoke_no_result(spawn(Responder), Sender),
-    ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
-    await_response(2),
-
-    LocalPids = spawn_responders(node(), Responder, 10),
-    RemotePids = spawn_responders(SecondaryNode, Responder, 10),
-    ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender),
-    await_response(20),
-
-    passed.
-
-make_responder(FMsg) -> make_responder(FMsg, timeout).
-make_responder(FMsg, Throw) ->
-    fun () ->
-            receive Msg -> FMsg(Msg)
-            after ?TIMEOUT -> throw(Throw)
-            end
-    end.
-
-spawn_responders(Node, Responder, Count) ->
-    [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
-
-await_response(0) ->
-    ok;
-await_response(Count) ->
-    receive
-        response -> ok,
-                    await_response(Count - 1)
-    after ?TIMEOUT -> throw(timeout)
-    end.
-
-must_exit(Fun) ->
-    try
-        Fun(),
-        throw(exit_not_thrown)
-    catch
-        exit:_ -> ok
-    end.
-
-test_delegates_sync(SecondaryNode) ->
-    Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
-    BadSender = fun (_Pid) -> exit(exception) end,
-
-    Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
-                                       gen_server:reply(From, response)
-                               end),
-
-    BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
-                                          gen_server:reply(From, response)
-                                  end, bad_responder_died),
-
-    response = delegate:invoke(spawn(Responder), Sender),
-    response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
-
-    must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
-    must_exit(fun () ->
-                      delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
-
-    LocalGoodPids = spawn_responders(node(), Responder, 2),
-    RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
-    LocalBadPids = spawn_responders(node(), BadResponder, 2),
-    RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
-
-    {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
-    true = lists:all(fun ({_, response}) -> true end, GoodRes),
-    GoodResPids = [Pid || {Pid, _} <- GoodRes],
-
-    Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
-    Good = lists:usort(GoodResPids),
-
-    {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
-    true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
-    BadResPids = [Pid || {Pid, _} <- BadRes],
-
-    Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
-    Bad = lists:usort(BadResPids),
-
-    MagicalPids = [rabbit_misc:string_to_pid(Str) ||
-                      Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
-    {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
-    true = lists:all(
-             fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
-             BadNodes),
-    BadNodesPids = [Pid || {Pid, _} <- BadNodes],
-
-    Magical = lists:usort(MagicalPids),
-    Magical = lists:usort(BadNodesPids),
-
-    passed.
-
-test_queue_cleanup(_SecondaryNode) ->
-    {_Writer, Ch} = test_spawn(),
-    rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
-    receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
-            ok
-    after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
-    end,
-    rabbit_channel:shutdown(Ch),
-    rabbit:stop(),
-    rabbit:start(),
-    {_Writer2, Ch2} = test_spawn(),
-    rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
-                                             queue   = ?CLEANUP_QUEUE_NAME }),
-    receive
-        #'channel.close'{reply_code = ?NOT_FOUND} ->
-            ok
-    after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
-    end,
-    rabbit_channel:shutdown(Ch2),
-    passed.
-
-test_declare_on_dead_queue(SecondaryNode) ->
-    QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
-    Self = self(),
-    Pid = spawn(SecondaryNode,
-                fun () ->
-                        {new, #amqqueue{name = QueueName, pid = QPid}} =
-                            rabbit_amqqueue:declare(QueueName, false, false, [],
-                                                    none),
-                        exit(QPid, kill),
-                        Self ! {self(), killed, QPid}
-                end),
-    receive
-        {Pid, killed, QPid} ->
-            {existing, #amqqueue{name = QueueName,
-                                 pid = QPid}} =
-                rabbit_amqqueue:declare(QueueName, false, false, [], none),
-            false = rabbit_misc:is_process_alive(QPid),
-            {new, Q} = rabbit_amqqueue:declare(QueueName, false, false, [],
-                                               none),
-            true = rabbit_misc:is_process_alive(Q#amqqueue.pid),
-            {ok, 0} = rabbit_amqqueue:delete(Q, false, false),
-            passed
-    after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
-    end.
-
-%%---------------------------------------------------------------------
-
-control_action(Command, Args) ->
-    control_action(Command, node(), Args, default_options()).
-
-control_action(Command, Args, NewOpts) ->
-    control_action(Command, node(), Args,
-                   expand_options(default_options(), NewOpts)).
-
-control_action(Command, Node, Args, Opts) ->
-    case catch rabbit_control_main:action(
-                 Command, Node, Args, Opts,
-                 fun (Format, Args1) ->
-                         io:format(Format ++ " ...~n", Args1)
-                 end) of
-        ok ->
-            io:format("done.~n"),
-            ok;
-        Other ->
-            io:format("failed.~n"),
-            Other
-    end.
-
-control_action_opts(Raw) ->
-    NodeStr = atom_to_list(node()),
-    case rabbit_control_main:parse_arguments(Raw, NodeStr) of
-        {ok, {Cmd, Opts, Args}} ->
-            case control_action(Cmd, node(), Args, Opts) of
-                ok -> ok;
-                _  -> error
-            end;
-        _ ->
-            error
-    end.
-
-info_action(Command, Args, CheckVHost) ->
-    ok = control_action(Command, []),
-    if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]);
-       true       -> ok
-    end,
-    ok = control_action(Command, lists:map(fun atom_to_list/1, Args)),
-    {bad_argument, dummy} = control_action(Command, ["dummy"]),
-    ok.
-
-default_options() -> [{"-p", "/"}, {"-q", "false"}].
-
-expand_options(As, Bs) ->
-    lists:foldl(fun({K, _}=A, R) ->
-                        case proplists:is_defined(K, R) of
-                            true -> R;
-                            false -> [A | R]
-                        end
-                end, Bs, As).
-
-check_parse_arguments(ExpRes, Fun, As) ->
-    SortRes =
-        fun (no_command)          -> no_command;
-            ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}}
-        end,
-
-    true = SortRes(ExpRes) =:= SortRes(Fun(As)).
-
-empty_files(Files) ->
-    [case file:read_file_info(File) of
-         {ok, FInfo} -> FInfo#file_info.size == 0;
-         Error       -> Error
-     end || File <- Files].
-
-non_empty_files(Files) ->
-    [case EmptyFile of
-         {error, Reason} -> {error, Reason};
-         _               -> not(EmptyFile)
-     end || EmptyFile <- empty_files(Files)].
-
-test_logs_working(MainLogFile, SaslLogFile) ->
-    ok = rabbit_log:error("foo bar"),
-    ok = error_logger:error_report(crash_report, [foo, bar]),
-    %% give the error loggers some time to catch up
-    timer:sleep(100),
-    [true, true] = non_empty_files([MainLogFile, SaslLogFile]),
-    ok.
-
-set_permissions(Path, Mode) ->
-    case file:read_file_info(Path) of
-        {ok, FInfo} -> file:write_file_info(
-                         Path,
-                         FInfo#file_info{mode=Mode});
-        Error       -> Error
-    end.
-
-clean_logs(Files, Suffix) ->
-    [begin
-         ok = delete_file(File),
-         ok = delete_file([File, Suffix])
-     end || File <- Files],
-    ok.
-
-assert_ram_node() ->
-    case rabbit_mnesia:node_type() of
-        disc -> exit('not_ram_node');
-        ram  -> ok
-    end.
-
-assert_disc_node() ->
-    case rabbit_mnesia:node_type() of
-        disc -> ok;
-        ram  -> exit('not_disc_node')
-    end.
-
-delete_file(File) ->
-    case file:delete(File) of
-        ok              -> ok;
-        {error, enoent} -> ok;
-        Error           -> Error
-    end.
-
-make_files_non_writable(Files) ->
-    [ok = file:write_file_info(File, #file_info{mode=0}) ||
-        File <- Files],
-    ok.
-
-add_log_handlers(Handlers) ->
-    [ok = error_logger:add_report_handler(Handler, Args) ||
-        {Handler, Args} <- Handlers],
-    ok.
-
-delete_log_handlers(Handlers) ->
-    [[] = error_logger:delete_report_handler(Handler) ||
-        Handler <- Handlers],
-    ok.
-
-test_supervisor_delayed_restart() ->
-    test_sup:test_supervisor_delayed_restart().
-
-test_file_handle_cache() ->
-    %% test copying when there is just one spare handle
-    Limit = file_handle_cache:get_limit(),
-    ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
-    TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
-    ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
-    [Src1, Dst1, Src2, Dst2] = Files =
-        [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
-    Content = <<"foo">>,
-    CopyFun = fun (Src, Dst) ->
-                      {ok, Hdl} = prim_file:open(Src, [binary, write]),
-                      ok = prim_file:write(Hdl, Content),
-                      ok = prim_file:sync(Hdl),
-                      prim_file:close(Hdl),
-
-                      {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
-                      {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
-                      Size = size(Content),
-                      {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
-                      ok = file_handle_cache:delete(SrcHdl),
-                      ok = file_handle_cache:delete(DstHdl)
-              end,
-    Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
-                                        filename:join(TmpDir, "file5"),
-                                        [write], []),
-                          receive {next, Pid1} -> Pid1 ! {next, self()} end,
-                          file_handle_cache:delete(Hdl),
-                          %% This will block and never return, so we
-                          %% exercise the fhc tidying up the pending
-                          %% queue on the death of a process.
-                          ok = CopyFun(Src1, Dst1)
-                end),
-    ok = CopyFun(Src1, Dst1),
-    ok = file_handle_cache:set_limit(2),
-    Pid ! {next, self()},
-    receive {next, Pid} -> ok end,
-    timer:sleep(100),
-    Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
-    timer:sleep(100),
-    erlang:monitor(process, Pid),
-    erlang:monitor(process, Pid1),
-    exit(Pid, kill),
-    exit(Pid1, kill),
-    receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
-    receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
-    [file:delete(File) || File <- Files],
-    ok = file_handle_cache:set_limit(Limit),
-    passed.
-
-test_backing_queue() ->
-    case application:get_env(rabbit, backing_queue_module) of
-        {ok, rabbit_variable_queue} ->
-            {ok, FileSizeLimit} =
-                application:get_env(rabbit, msg_store_file_size_limit),
-            application:set_env(rabbit, msg_store_file_size_limit, 512,
-                                infinity),
-            {ok, MaxJournal} =
-                application:get_env(rabbit, queue_index_max_journal_entries),
-            application:set_env(rabbit, queue_index_max_journal_entries, 128,
-                                infinity),
-            passed = test_msg_store(),
-            application:set_env(rabbit, msg_store_file_size_limit,
-                                FileSizeLimit, infinity),
-            passed = test_queue_index(),
-            passed = test_queue_index_props(),
-            passed = test_variable_queue(),
-            passed = test_variable_queue_delete_msg_store_files_callback(),
-            passed = test_queue_recover(),
-            application:set_env(rabbit, queue_index_max_journal_entries,
-                                MaxJournal, infinity),
-            %% We will have restarted the message store, and thus changed
-            %% the order of the children of rabbit_sup. This will cause
-            %% problems if there are subsequent failures - see bug 24262.
-            ok = restart_app(),
-            passed;
-        _ ->
-            passed
-    end.
-
-restart_msg_store_empty() ->
-    ok = rabbit_variable_queue:stop_msg_store(),
-    ok = rabbit_variable_queue:start_msg_store(
-           undefined, {fun (ok) -> finished end, ok}).
-
-msg_id_bin(X) ->
-    erlang:md5(term_to_binary(X)).
-
-msg_store_client_init(MsgStore, Ref) ->
-    rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
-
-on_disk_capture() ->
-    receive
-        {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
-        stop                 -> done
-    end.
-
-on_disk_capture([_|_], _Awaiting, Pid) ->
-    Pid ! {self(), surplus};
-on_disk_capture(OnDisk, Awaiting, Pid) ->
-    receive
-        {on_disk, MsgIdsS} ->
-            MsgIds = gb_sets:to_list(MsgIdsS),
-            on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
-                            Pid);
-        stop ->
-            done
-    after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
-            case Awaiting of
-                [] -> Pid ! {self(), arrived}, on_disk_capture();
-                _  -> Pid ! {self(), timeout}
-            end
-    end.
-
-on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
-    Pid ! {await, MsgIds, self()},
-    receive
-        {Pid, arrived} -> ok;
-        {Pid, Error}   -> Error
-    end.
-
-on_disk_stop(Pid) ->
-    MRef = erlang:monitor(process, Pid),
-    Pid ! stop,
-    receive {'DOWN', MRef, process, Pid, _Reason} ->
-            ok
-    end.
-
-msg_store_client_init_capture(MsgStore, Ref) ->
-    Pid = spawn(fun on_disk_capture/0),
-    {Pid, rabbit_msg_store:client_init(
-            MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
-                                   Pid ! {on_disk, MsgIds}
-                           end, undefined)}.
-
-msg_store_contains(Atom, MsgIds, MSCState) ->
-    Atom = lists:foldl(
-             fun (MsgId, Atom1) when Atom1 =:= Atom ->
-                     rabbit_msg_store:contains(MsgId, MSCState) end,
-             Atom, MsgIds).
-
-msg_store_read(MsgIds, MSCState) ->
-    lists:foldl(fun (MsgId, MSCStateM) ->
-                        {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
-                                                     MsgId, MSCStateM),
-                        MSCStateN
-                end, MSCState, MsgIds).
-
-msg_store_write(MsgIds, MSCState) ->
-    ok = lists:foldl(fun (MsgId, ok) ->
-                             rabbit_msg_store:write(MsgId, MsgId, MSCState)
-                     end, ok, MsgIds).
-
-msg_store_remove(MsgIds, MSCState) ->
-    rabbit_msg_store:remove(MsgIds, MSCState).
-
-msg_store_remove(MsgStore, Ref, MsgIds) ->
-    with_msg_store_client(MsgStore, Ref,
-                          fun (MSCStateM) ->
-                                  ok = msg_store_remove(MsgIds, MSCStateM),
-                                  MSCStateM
-                          end).
-
-with_msg_store_client(MsgStore, Ref, Fun) ->
-    rabbit_msg_store:client_terminate(
-      Fun(msg_store_client_init(MsgStore, Ref))).
-
-foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
-    rabbit_msg_store:client_terminate(
-      lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
-                  msg_store_client_init(MsgStore, Ref), L)).
-
-test_msg_store() ->
-    restart_msg_store_empty(),
-    MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
-    {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
-    Ref = rabbit_guid:gen(),
-    {Cap, MSCState} = msg_store_client_init_capture(
-                        ?PERSISTENT_MSG_STORE, Ref),
-    Ref2 = rabbit_guid:gen(),
-    {Cap2, MSC2State} = msg_store_client_init_capture(
-                          ?PERSISTENT_MSG_STORE, Ref2),
-    %% check we don't contain any of the msgs we're about to publish
-    false = msg_store_contains(false, MsgIds, MSCState),
-    %% test confirm logic
-    passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
-    %% check we don't contain any of the msgs we're about to publish
-    false = msg_store_contains(false, MsgIds, MSCState),
-    %% publish the first half
-    ok = msg_store_write(MsgIds1stHalf, MSCState),
-    %% sync on the first half
-    ok = on_disk_await(Cap, MsgIds1stHalf),
-    %% publish the second half
-    ok = msg_store_write(MsgIds2ndHalf, MSCState),
-    %% check they're all in there
-    true = msg_store_contains(true, MsgIds, MSCState),
-    %% publish the latter half twice so we hit the caching and ref
-    %% count code. We need to do this through a 2nd client since a
-    %% single client is not supposed to write the same message more
-    %% than once without first removing it.
-    ok = msg_store_write(MsgIds2ndHalf, MSC2State),
-    %% check they're still all in there
-    true = msg_store_contains(true, MsgIds, MSCState),
-    %% sync on the 2nd half
-    ok = on_disk_await(Cap2, MsgIds2ndHalf),
-    %% cleanup
-    ok = on_disk_stop(Cap2),
-    ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
-    ok = on_disk_stop(Cap),
-    %% read them all
-    MSCState1 = msg_store_read(MsgIds, MSCState),
-    %% read them all again - this will hit the cache, not disk
-    MSCState2 = msg_store_read(MsgIds, MSCState1),
-    %% remove them all
-    ok = msg_store_remove(MsgIds, MSCState2),
-    %% check first half doesn't exist
-    false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
-    %% check second half does exist
-    true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
-    %% read the second half again
-    MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
-    %% read the second half again, just for fun (aka code coverage)
-    MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
-    ok = rabbit_msg_store:client_terminate(MSCState4),
-    %% stop and restart, preserving every other msg in 2nd half
-    ok = rabbit_variable_queue:stop_msg_store(),
-    ok = rabbit_variable_queue:start_msg_store(
-           [], {fun ([]) -> finished;
-                    ([MsgId|MsgIdsTail])
-                      when length(MsgIdsTail) rem 2 == 0 ->
-                        {MsgId, 1, MsgIdsTail};
-                    ([MsgId|MsgIdsTail]) ->
-                        {MsgId, 0, MsgIdsTail}
-                end, MsgIds2ndHalf}),
-    MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
-    %% check we have the right msgs left
-    lists:foldl(
-      fun (MsgId, Bool) ->
-              not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
-      end, false, MsgIds2ndHalf),
-    ok = rabbit_msg_store:client_terminate(MSCState5),
-    %% restart empty
-    restart_msg_store_empty(),
-    MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
-    %% check we don't contain any of the msgs
-    false = msg_store_contains(false, MsgIds, MSCState6),
-    %% publish the first half again
-    ok = msg_store_write(MsgIds1stHalf, MSCState6),
-    %% this should force some sort of sync internally otherwise misread
-    ok = rabbit_msg_store:client_terminate(
-           msg_store_read(MsgIds1stHalf, MSCState6)),
-    MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
-    ok = msg_store_remove(MsgIds1stHalf, MSCState7),
-    ok = rabbit_msg_store:client_terminate(MSCState7),
-    %% restart empty
-    restart_msg_store_empty(), %% now safe to reuse msg_ids
-    %% push a lot of msgs in... at least 100 files worth
-    {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
-    PayloadSizeBits = 65536,
-    BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
-    MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
-    Payload = << 0:PayloadSizeBits >>,
-    ok = with_msg_store_client(
-           ?PERSISTENT_MSG_STORE, Ref,
-           fun (MSCStateM) ->
-                   [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
-                       MsgId <- MsgIdsBig],
-                   MSCStateM
-           end),
-    %% now read them to ensure we hit the fast client-side reading
-    ok = foreach_with_msg_store_client(
-           ?PERSISTENT_MSG_STORE, Ref,
-           fun (MsgId, MSCStateM) ->
-                   {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
-                                                  MsgId, MSCStateM),
-                   MSCStateN
-           end, MsgIdsBig),
-    %% .., then 3s by 1...
-    ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
-                          [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
-    %% .., then remove 3s by 2, from the young end first. This hits
-    %% GC (under 50% good data left, but no empty files. Must GC).
-    ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
-                          [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
-    %% .., then remove 3s by 3, from the young end first. This hits
-    %% GC...
-    ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
-                          [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
-    %% ensure empty
-    ok = with_msg_store_client(
-           ?PERSISTENT_MSG_STORE, Ref,
-           fun (MSCStateM) ->
-                   false = msg_store_contains(false, MsgIdsBig, MSCStateM),
-                   MSCStateM
-           end),
-    %%
-    passed = test_msg_store_client_delete_and_terminate(),
-    %% restart empty
-    restart_msg_store_empty(),
-    passed.
-
-test_msg_store_confirms(MsgIds, Cap, MSCState) ->
-    %% write -> confirmed
-    ok = msg_store_write(MsgIds, MSCState),
-    ok = on_disk_await(Cap, MsgIds),
-    %% remove -> _
-    ok = msg_store_remove(MsgIds, MSCState),
-    ok = on_disk_await(Cap, []),
-    %% write, remove -> confirmed
-    ok = msg_store_write(MsgIds, MSCState),
-    ok = msg_store_remove(MsgIds, MSCState),
-    ok = on_disk_await(Cap, MsgIds),
-    %% write, remove, write -> confirmed, confirmed
-    ok = msg_store_write(MsgIds, MSCState),
-    ok = msg_store_remove(MsgIds, MSCState),
-    ok = msg_store_write(MsgIds, MSCState),
-    ok = on_disk_await(Cap, MsgIds ++ MsgIds),
-    %% remove, write -> confirmed
-    ok = msg_store_remove(MsgIds, MSCState),
-    ok = msg_store_write(MsgIds, MSCState),
-    ok = on_disk_await(Cap, MsgIds),
-    %% remove, write, remove -> confirmed
-    ok = msg_store_remove(MsgIds, MSCState),
-    ok = msg_store_write(MsgIds, MSCState),
-    ok = msg_store_remove(MsgIds, MSCState),
-    ok = on_disk_await(Cap, MsgIds),
-    %% confirmation on timer-based sync
-    passed = test_msg_store_confirm_timer(),
-    passed.
-
-test_msg_store_confirm_timer() ->
-    Ref = rabbit_guid:gen(),
-    MsgId  = msg_id_bin(1),
-    Self = self(),
-    MSCState = rabbit_msg_store:client_init(
-                 ?PERSISTENT_MSG_STORE, Ref,
-                 fun (MsgIds, _ActionTaken) ->
-                         case gb_sets:is_member(MsgId, MsgIds) of
-                             true  -> Self ! on_disk;
-                             false -> ok
-                         end
-                 end, undefined),
-    ok = msg_store_write([MsgId], MSCState),
-    ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState),
-    ok = msg_store_remove([MsgId], MSCState),
-    ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
-    passed.
-
-msg_store_keep_busy_until_confirm(MsgIds, MSCState) ->
-    receive
-        on_disk -> ok
-    after 0 ->
-            ok = msg_store_write(MsgIds, MSCState),
-            ok = msg_store_remove(MsgIds, MSCState),
-            msg_store_keep_busy_until_confirm(MsgIds, MSCState)
-    end.
-
-test_msg_store_client_delete_and_terminate() ->
-    restart_msg_store_empty(),
-    MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
-    Ref = rabbit_guid:gen(),
-    MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
-    ok = msg_store_write(MsgIds, MSCState),
-    %% test the 'dying client' fast path for writes
-    ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
-    passed.
-
-queue_name(Name) ->
-    rabbit_misc:r(<<"/">>, queue, Name).
-
-test_queue() ->
-    queue_name(<<"test">>).
-
-init_test_queue() ->
-    TestQueue = test_queue(),
-    PRef = rabbit_guid:gen(),
-    PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
-    Res = rabbit_queue_index:recover(
-            TestQueue, [], false,
-            fun (MsgId) ->
-                    rabbit_msg_store:contains(MsgId, PersistentClient)
-            end,
-            fun nop/1),
-    ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
-    Res.
-
-restart_test_queue(Qi) ->
-    _ = rabbit_queue_index:terminate([], Qi),
-    ok = rabbit_variable_queue:stop(),
-    {ok, _} = rabbit_variable_queue:start([test_queue()]),
-    init_test_queue().
-
-empty_test_queue() ->
-    ok = rabbit_variable_queue:stop(),
-    {ok, _} = rabbit_variable_queue:start([]),
-    {0, Qi} = init_test_queue(),
-    _ = rabbit_queue_index:delete_and_terminate(Qi),
-    ok.
-
-with_empty_test_queue(Fun) ->
-    ok = empty_test_queue(),
-    {0, Qi} = init_test_queue(),
-    rabbit_queue_index:delete_and_terminate(Fun(Qi)).
-
-restart_app() ->
-    rabbit:stop(),
-    rabbit:start().
-
-queue_index_publish(SeqIds, Persistent, Qi) ->
-    Ref = rabbit_guid:gen(),
-    MsgStore = case Persistent of
-                   true  -> ?PERSISTENT_MSG_STORE;
-                   false -> ?TRANSIENT_MSG_STORE
-               end,
-    MSCState = msg_store_client_init(MsgStore, Ref),
-    {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
-        lists:foldl(
-          fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
-                  MsgId = rabbit_guid:gen(),
-                  QiM = rabbit_queue_index:publish(
-                          MsgId, SeqId, #message_properties{}, Persistent, QiN),
-                  ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
-                  {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
-          end, {Qi, []}, SeqIds),
-    %% do this just to force all of the publishes through to the msg_store:
-    true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
-    ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
-    {A, B}.
-
-verify_read_with_published(_Delivered, _Persistent, [], _) ->
-    ok;
-verify_read_with_published(Delivered, Persistent,
-                           [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
-                           [{SeqId, MsgId}|Published]) ->
-    verify_read_with_published(Delivered, Persistent, Read, Published);
-verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
-    ko.
-
-test_queue_index_props() ->
-    with_empty_test_queue(
-      fun(Qi0) ->
-              MsgId = rabbit_guid:gen(),
-              Props = #message_properties{expiry=12345},
-              Qi1 = rabbit_queue_index:publish(MsgId, 1, Props, true, Qi0),
-              {[{MsgId, 1, Props, _, _}], Qi2} =
-                  rabbit_queue_index:read(1, 2, Qi1),
-              Qi2
-      end),
-
-    ok = rabbit_variable_queue:stop(),
-    {ok, _} = rabbit_variable_queue:start([]),
-
-    passed.
-
-test_queue_index() ->
-    SegmentSize = rabbit_queue_index:next_segment_boundary(0),
-    TwoSegs = SegmentSize + SegmentSize,
-    MostOfASegment = trunc(SegmentSize*0.75),
-    SeqIdsA = lists:seq(0, MostOfASegment-1),
-    SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
-    SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
-    SeqIdsD = lists:seq(0, SegmentSize*4),
-
-    with_empty_test_queue(
-      fun (Qi0) ->
-              {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
-              {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
-              {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
-              {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
-              ok = verify_read_with_published(false, false, ReadA,
-                                              lists:reverse(SeqIdsMsgIdsA)),
-              %% should get length back as 0, as all the msgs were transient
-              {0, Qi6} = restart_test_queue(Qi4),
-              {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
-              {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
-              {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
-              {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
-              ok = verify_read_with_published(false, true, ReadB,
-                                              lists:reverse(SeqIdsMsgIdsB)),
-              %% should get length back as MostOfASegment
-              LenB = length(SeqIdsB),
-              {LenB, Qi12} = restart_test_queue(Qi10),
-              {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
-              Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
-              {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
-              ok = verify_read_with_published(true, true, ReadC,
-                                              lists:reverse(SeqIdsMsgIdsB)),
-              Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
-              Qi17 = rabbit_queue_index:flush(Qi16),
-              %% Everything will have gone now because #pubs == #acks
-              {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
-              %% should get length back as 0 because all persistent
-              %% msgs have been acked
-              {0, Qi19} = restart_test_queue(Qi18),
-              Qi19
-      end),
-
-    %% These next bits are just to hit the auto deletion of segment files.
-    %% First, partials:
-    %% a) partial pub+del+ack, then move to new segment
-    with_empty_test_queue(
-      fun (Qi0) ->
-              {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
-                                                         false, Qi0),
-              Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
-              Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
-              Qi4 = rabbit_queue_index:flush(Qi3),
-              {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
-                                                          false, Qi4),
-              Qi5
-      end),
-
-    %% b) partial pub+del, then move to new segment, then ack all in old segment
-    with_empty_test_queue(
-      fun (Qi0) ->
-              {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
-                                                          false, Qi0),
-              Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
-              {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
-                                                          false, Qi2),
-              Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
-              rabbit_queue_index:flush(Qi4)
-      end),
-
-    %% c) just fill up several segments of all pubs, then +dels, then +acks
-    with_empty_test_queue(
-      fun (Qi0) ->
-              {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
-                                                         false, Qi0),
-              Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
-              Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
-              rabbit_queue_index:flush(Qi3)
-      end),
-
-    %% d) get messages in all states to a segment, then flush, then do
-    %% the same again, don't flush and read. This will hit all
-    %% possibilities in combining the segment with the journal.
-    with_empty_test_queue(
-      fun (Qi0) ->
-              {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
-                                                               false, Qi0),
-              Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
-              Qi3 = rabbit_queue_index:ack([0], Qi2),
-              Qi4 = rabbit_queue_index:flush(Qi3),
-              {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
-              Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
-              Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
-              {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
-              {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
-              ok = verify_read_with_published(true, false, ReadD,
-                                              [Four, Five, Six]),
-              {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
-              ok = verify_read_with_published(false, false, ReadE,
-                                              [Seven, Eight]),
-              Qi10
-      end),
-
-    %% e) as for (d), but use terminate instead of read, which will
-    %% exercise journal_minus_segment, not segment_plus_journal.
-    with_empty_test_queue(
-      fun (Qi0) ->
-              {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
-                                                         true, Qi0),
-              Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
-              Qi3 = rabbit_queue_index:ack([0], Qi2),
-              {5, Qi4} = restart_test_queue(Qi3),
-              {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
-              Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
-              Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
-              {5, Qi8} = restart_test_queue(Qi7),
-              Qi8
-      end),
-
-    ok = rabbit_variable_queue:stop(),
-    {ok, _} = rabbit_variable_queue:start([]),
-
-    passed.
-
-variable_queue_init(Q, Recover) ->
-    rabbit_variable_queue:init(
-      Q, case Recover of
-             true  -> non_clean_shutdown;
-             false -> new
-         end, fun nop/2, fun nop/2, fun nop/1).
-
-variable_queue_publish(IsPersistent, Count, VQ) ->
-    variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
-
-variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
-    variable_queue_publish(IsPersistent, 1, Count, PropFun,
-                           fun (_N) -> <<>> end, VQ).
-
-variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
-    variable_queue_wait_for_shuffling_end(
-      lists:foldl(
-        fun (N, VQN) ->
-                rabbit_variable_queue:publish(
-                  rabbit_basic:message(
-                    rabbit_misc:r(<<>>, exchange, <<>>),
-                    <<>>, #'P_basic'{delivery_mode = case IsPersistent of
-                                                         true  -> 2;
-                                                         false -> 1
-                                                     end},
-                    PayloadFun(N)),
-                  PropFun(N, #message_properties{}), false, self(), VQN)
-        end, VQ, lists:seq(Start, Start + Count - 1))).
-
-variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
-    lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
-                        Rem = Len - N,
-                        {{#basic_message { is_persistent = IsPersistent },
-                          IsDelivered, AckTagN}, VQM} =
-                            rabbit_variable_queue:fetch(true, VQN),
-                        Rem = rabbit_variable_queue:len(VQM),
-                        {VQM, [AckTagN | AckTagsAcc]}
-                end, {VQ, []}, lists:seq(1, Count)).
-
-variable_queue_set_ram_duration_target(Duration, VQ) ->
-    variable_queue_wait_for_shuffling_end(
-      rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
-
-assert_prop(List, Prop, Value) ->
-    Value = proplists:get_value(Prop, List).
-
-assert_props(List, PropVals) ->
-    [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
-
-test_amqqueue(Durable) ->
-    (rabbit_amqqueue:pseudo_queue(test_queue(), self()))
-        #amqqueue { durable = Durable }.
-
-with_fresh_variable_queue(Fun) ->
-    Ref = make_ref(),
-    Me = self(),
-    %% Run in a separate process since rabbit_msg_store will send
-    %% bump_credit messages and we want to ignore them
-    spawn_link(fun() ->
-                       ok = empty_test_queue(),
-                       VQ = variable_queue_init(test_amqqueue(true), false),
-                       S0 = rabbit_variable_queue:status(VQ),
-                       assert_props(S0, [{q1, 0}, {q2, 0},
-                                         {delta,
-                                          {delta, undefined, 0, undefined}},
-                                         {q3, 0}, {q4, 0},
-                                         {len, 0}]),
-                       _ = rabbit_variable_queue:delete_and_terminate(
-                        shutdown, Fun(VQ)),
-                       Me ! Ref
-               end),
-    receive
-        Ref -> ok
-    end,
-    passed.
-
-publish_and_confirm(Q, Payload, Count) ->
-    Seqs = lists:seq(1, Count),
-    [begin
-         Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
-                                    <<>>, #'P_basic'{delivery_mode = 2},
-                                    Payload),
-         Delivery = #delivery{mandatory = false, sender = self(),
-                              confirm = true, message = Msg, msg_seq_no = Seq},
-          _QPids = rabbit_amqqueue:deliver([Q], Delivery)
-     end || Seq <- Seqs],
-    wait_for_confirms(gb_sets:from_list(Seqs)).
-
-wait_for_confirms(Unconfirmed) ->
-    case gb_sets:is_empty(Unconfirmed) of
-        true  -> ok;
-        false -> receive {'$gen_cast', {confirm, Confirmed, _}} ->
-                         wait_for_confirms(
-                           rabbit_misc:gb_sets_difference(
-                             Unconfirmed, gb_sets:from_list(Confirmed)))
-                 after ?TIMEOUT -> exit(timeout_waiting_for_confirm)
-                 end
-    end.
-
-test_variable_queue() ->
-    [passed = with_fresh_variable_queue(F) ||
-        F <- [fun test_variable_queue_dynamic_duration_change/1,
-              fun test_variable_queue_partial_segments_delta_thing/1,
-              fun test_variable_queue_all_the_bits_not_covered_elsewhere1/1,
-              fun test_variable_queue_all_the_bits_not_covered_elsewhere2/1,
-              fun test_drop/1,
-              fun test_variable_queue_fold_msg_on_disk/1,
-              fun test_dropfetchwhile/1,
-              fun test_dropwhile_varying_ram_duration/1,
-              fun test_fetchwhile_varying_ram_duration/1,
-              fun test_variable_queue_ack_limiting/1,
-              fun test_variable_queue_purge/1,
-              fun test_variable_queue_requeue/1,
-              fun test_variable_queue_requeue_ram_beta/1,
-              fun test_variable_queue_fold/1]],
-    passed.
-
-test_variable_queue_fold(VQ0) ->
-    {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
-        variable_queue_with_holes(VQ0),
-    Count = rabbit_variable_queue:depth(VQ1),
-    Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
-    lists:foldl(fun (Cut, VQ2) ->
-                        test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
-                end, VQ1, [0, 1, 2, Count div 2,
-                           Count - 1, Count, Count + 1, Count * 2]).
-
-test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
-    {Acc, VQ1} = rabbit_variable_queue:fold(
-                   fun (M, _, Pending, A) ->
-                           MInt = msg2int(M),
-                           Pending = lists:member(MInt, PendingMsgs), %% assert
-                           case MInt =< Cut of
-                               true  -> {cont, [MInt | A]};
-                               false -> {stop, A}
-                           end
-                   end, [], VQ0),
-    Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
-    Expected = lists:reverse(Acc), %% assertion
-    VQ1.
-
-msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
-    binary_to_term(list_to_binary(lists:reverse(P))).
-
-ack_subset(AckSeqs, Interval, Rem) ->
-    lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
-
-requeue_one_by_one(Acks, VQ) ->
-    lists:foldl(fun (AckTag, VQN) ->
-                        {_MsgId, VQM} = rabbit_variable_queue:requeue(
-                                          [AckTag], VQN),
-                        VQM
-                end, VQ, Acks).
-
-%% Create a vq with messages in q1, delta, and q3, and holes (in the
-%% form of pending acks) in the latter two.
-variable_queue_with_holes(VQ0) ->
-    Interval = 2048, %% should match vq:IO_BATCH_SIZE
-    Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
-    Seq = lists:seq(1, Count),
-    VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
-    VQ2 = variable_queue_publish(
-            false, 1, Count,
-            fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
-    {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
-    Acks = lists:reverse(AcksR),
-    AckSeqs = lists:zip(Acks, Seq),
-    [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
-        [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
-    %% we requeue in three phases in order to exercise requeuing logic
-    %% in various vq states
-    {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
-                       Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
-    VQ5 = requeue_one_by_one(Subset1, VQ4),
-    %% by now we have some messages (and holes) in delta
-    VQ6 = requeue_one_by_one(Subset2, VQ5),
-    VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
-    %% add the q1 tail
-    VQ8 = variable_queue_publish(
-            true, Count + 1, Interval,
-            fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
-    %% assertions
-    [false = case V of
-                 {delta, _, 0, _} -> true;
-                 0                -> true;
-                 _                -> false
-             end || {K, V} <- rabbit_variable_queue:status(VQ8),
-                    lists:member(K, [q1, delta, q3])],
-    Depth = Count + Interval,
-    Depth = rabbit_variable_queue:depth(VQ8),
-    Len = Depth - length(Subset3),
-    Len = rabbit_variable_queue:len(VQ8),
-    {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
-
-test_variable_queue_requeue(VQ0) ->
-    {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
-        variable_queue_with_holes(VQ0),
-    Msgs =
-        lists:zip(RequeuedMsgs,
-                  lists:duplicate(length(RequeuedMsgs), true)) ++
-        lists:zip(FreshMsgs,
-                  lists:duplicate(length(FreshMsgs), false)),
-    VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
-                              {{M, MRequeued, _}, VQb} =
-                                  rabbit_variable_queue:fetch(true, VQa),
-                              Requeued = MRequeued, %% assertion
-                              I = msg2int(M),       %% assertion
-                              VQb
-                      end, VQ1, Msgs),
-    {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
-    VQ3.
-
-%% requeue from ram_pending_ack into q3, move to delta and then empty queue
-test_variable_queue_requeue_ram_beta(VQ0) ->
-    Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
-    VQ1 = variable_queue_publish(false, Count, VQ0),
-    {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
-    {Back, Front} = lists:split(Count div 2, AcksR),
-    {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
-    VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
-    {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
-    VQ6 = requeue_one_by_one(Front, VQ5),
-    {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
-    {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
-    VQ8.
-
-test_variable_queue_purge(VQ0) ->
-    LenDepth = fun (VQ) ->
-                       {rabbit_variable_queue:len(VQ),
-                        rabbit_variable_queue:depth(VQ)}
-               end,
-    VQ1         = variable_queue_publish(false, 10, VQ0),
-    {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
-    {4, VQ3}    = rabbit_variable_queue:purge(VQ2),
-    {0, 6}      = LenDepth(VQ3),
-    {_, VQ4}    = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
-    {2, 6}      = LenDepth(VQ4),
-    VQ5         = rabbit_variable_queue:purge_acks(VQ4),
-    {2, 2}      = LenDepth(VQ5),
-    VQ5.
-
-test_variable_queue_ack_limiting(VQ0) ->
-    %% start by sending in a bunch of messages
-    Len = 1024,
-    VQ1 = variable_queue_publish(false, Len, VQ0),
-
-    %% squeeze and relax queue
-    Churn = Len div 32,
-    VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
-
-    %% update stats for duration
-    {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
-
-    %% fetch half the messages
-    {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
-
-    VQ5 = check_variable_queue_status(VQ4, [{len          , Len div 2},
-                                            {ram_ack_count, Len div 2},
-                                            {ram_msg_count, Len div 2}]),
-
-    %% ensure all acks go to disk on 0 duration target
-    VQ6 = check_variable_queue_status(
-            variable_queue_set_ram_duration_target(0, VQ5),
-            [{len, Len div 2},
-             {target_ram_count, 0},
-             {ram_msg_count, 0},
-             {ram_ack_count, 0}]),
-
-    VQ6.
-
-test_drop(VQ0) ->
-    %% start by sending a messages
-    VQ1 = variable_queue_publish(false, 1, VQ0),
-    %% drop message with AckRequired = true
-    {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
-    true = rabbit_variable_queue:is_empty(VQ2),
-    true = AckTag =/= undefinded,
-    %% drop again -> empty
-    {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
-    %% requeue
-    {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
-    %% drop message with AckRequired = false
-    {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
-    true = rabbit_variable_queue:is_empty(VQ5),
-    VQ5.
-
-test_dropfetchwhile(VQ0) ->
-    Count = 10,
-
-    %% add messages with sequential expiry
-    VQ1 = variable_queue_publish(
-            false, 1, Count,
-            fun (N, Props) -> Props#message_properties{expiry = N} end,
-            fun erlang:term_to_binary/1, VQ0),
-
-    %% fetch the first 5 messages
-    {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
-        rabbit_variable_queue:fetchwhile(
-          fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
-          fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
-                  {[Msg | MsgAcc], [AckTag | AckAcc]}
-          end, {[], []}, VQ1),
-    true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
-
-    %% requeue them
-    {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
-
-    %% drop the first 5 messages
-    {#message_properties{expiry = 6}, VQ4} =
-        rabbit_variable_queue:dropwhile(
-          fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
-
-    %% fetch 5
-    VQ5 = lists:foldl(fun (N, VQN) ->
-                              {{Msg, _, _}, VQM} =
-                                  rabbit_variable_queue:fetch(false, VQN),
-                              true = msg2int(Msg) == N,
-                              VQM
-                      end, VQ4, lists:seq(6, Count)),
-
-    %% should be empty now
-    true = rabbit_variable_queue:is_empty(VQ5),
-
-    VQ5.
-
-test_dropwhile_varying_ram_duration(VQ0) ->
-    test_dropfetchwhile_varying_ram_duration(
-      fun (VQ1) ->
-              {_, VQ2} = rabbit_variable_queue:dropwhile(
-                           fun (_) -> false end, VQ1),
-              VQ2
-      end, VQ0).
-
-test_fetchwhile_varying_ram_duration(VQ0) ->
-    test_dropfetchwhile_varying_ram_duration(
-      fun (VQ1) ->
-              {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
-                               fun (_) -> false end,
-                               fun (_, _, A) -> A end,
-                               ok, VQ1),
-              VQ2
-      end, VQ0).
-
-test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
-    VQ1 = variable_queue_publish(false, 1, VQ0),
-    VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
-    VQ3 = Fun(VQ2),
-    VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
-    VQ5 = variable_queue_publish(false, 1, VQ4),
-    VQ6 = Fun(VQ5),
-    VQ6.
-
-test_variable_queue_dynamic_duration_change(VQ0) ->
-    SegmentSize = rabbit_queue_index:next_segment_boundary(0),
-
-    %% start by sending in a couple of segments worth
-    Len = 2*SegmentSize,
-    VQ1 = variable_queue_publish(false, Len, VQ0),
-    %% squeeze and relax queue
-    Churn = Len div 32,
-    VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
-
-    {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
-    VQ7 = lists:foldl(
-            fun (Duration1, VQ4) ->
-                    {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
-                    io:format("~p:~n~p~n",
-                              [Duration1, rabbit_variable_queue:status(VQ5)]),
-                    VQ6 = variable_queue_set_ram_duration_target(
-                            Duration1, VQ5),
-                    publish_fetch_and_ack(Churn, Len, VQ6)
-            end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
-
-    %% drain
-    {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
-    {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
-    {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
-
-    VQ10.
-
-publish_fetch_and_ack(0, _Len, VQ0) ->
-    VQ0;
-publish_fetch_and_ack(N, Len, VQ0) ->
-    VQ1 = variable_queue_publish(false, 1, VQ0),
-    {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
-    Len = rabbit_variable_queue:len(VQ2),
-    {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
-    publish_fetch_and_ack(N-1, Len, VQ3).
-
-test_variable_queue_partial_segments_delta_thing(VQ0) ->
-    SegmentSize = rabbit_queue_index:next_segment_boundary(0),
-    HalfSegment = SegmentSize div 2,
-    OneAndAHalfSegment = SegmentSize + HalfSegment,
-    VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
-    {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
-    VQ3 = check_variable_queue_status(
-            variable_queue_set_ram_duration_target(0, VQ2),
-            %% one segment in q3, and half a segment in delta
-            [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
-             {q3, SegmentSize},
-             {len, SegmentSize + HalfSegment}]),
-    VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
-    VQ5 = check_variable_queue_status(
-            variable_queue_publish(true, 1, VQ4),
-            %% one alpha, but it's in the same segment as the deltas
-            [{q1, 1},
-             {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
-             {q3, SegmentSize},
-             {len, SegmentSize + HalfSegment + 1}]),
-    {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
-                                          SegmentSize + HalfSegment + 1, VQ5),
-    VQ7 = check_variable_queue_status(
-            VQ6,
-            %% the half segment should now be in q3
-            [{q1, 1},
-             {delta, {delta, undefined, 0, undefined}},
-             {q3, HalfSegment},
-             {len, HalfSegment + 1}]),
-    {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
-                                           HalfSegment + 1, VQ7),
-    {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
-    %% should be empty now
-    {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
-    VQ10.
-
-check_variable_queue_status(VQ0, Props) ->
-    VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
-    S = rabbit_variable_queue:status(VQ1),
-    io:format("~p~n", [S]),
-    assert_props(S, Props),
-    VQ1.
-
-variable_queue_wait_for_shuffling_end(VQ) ->
-    case credit_flow:blocked() of
-        false -> VQ;
-        true  -> receive
-                     {bump_credit, Msg} ->
-                         credit_flow:handle_bump_msg(Msg),
-                         variable_queue_wait_for_shuffling_end(
-                           rabbit_variable_queue:resume(VQ))
-                 end
-    end.
-
-test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) ->
-    Count = 2 * rabbit_queue_index:next_segment_boundary(0),
-    VQ1 = variable_queue_publish(true, Count, VQ0),
-    VQ2 = variable_queue_publish(false, Count, VQ1),
-    VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
-    {VQ4, _AckTags}  = variable_queue_fetch(Count, true, false,
-                                            Count + Count, VQ3),
-    {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
-                                            Count, VQ4),
-    _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
-    VQ7 = variable_queue_init(test_amqqueue(true), true),
-    {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
-    Count1 = rabbit_variable_queue:len(VQ8),
-    VQ9 = variable_queue_publish(false, 1, VQ8),
-    VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
-    {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
-    {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
-    VQ12.
-
-test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) ->
-    VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
-    VQ2 = variable_queue_publish(false, 4, VQ1),
-    {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
-    {_Guids, VQ4} =
-        rabbit_variable_queue:requeue(AckTags, VQ3),
-    VQ5 = rabbit_variable_queue:timeout(VQ4),
-    _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
-    VQ7 = variable_queue_init(test_amqqueue(true), true),
-    {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
-    VQ8.
-
-test_variable_queue_fold_msg_on_disk(VQ0) ->
-    VQ1 = variable_queue_publish(true, 1, VQ0),
-    {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
-    {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
-                                              ok, VQ2, AckTags),
-    VQ3.
-
-test_queue_recover() ->
-    Count = 2 * rabbit_queue_index:next_segment_boundary(0),
-    {new, #amqqueue { pid = QPid, name = QName } = Q} =
-        rabbit_amqqueue:declare(test_queue(), true, false, [], none),
-    publish_and_confirm(Q, <<>>, Count),
-
-    exit(QPid, kill),
-    MRef = erlang:monitor(process, QPid),
-    receive {'DOWN', MRef, process, QPid, _Info} -> ok
-    after 10000 -> exit(timeout_waiting_for_queue_death)
-    end,
-    rabbit_amqqueue:stop(),
-    rabbit_amqqueue:start(rabbit_amqqueue:recover()),
-    {ok, Limiter} = rabbit_limiter:start_link(no_id),
-    rabbit_amqqueue:with_or_die(
-      QName,
-      fun (Q1 = #amqqueue { pid = QPid1 }) ->
-              CountMinusOne = Count - 1,
-              {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} =
-                  rabbit_amqqueue:basic_get(Q1, self(), false, Limiter),
-              exit(QPid1, shutdown),
-              VQ1 = variable_queue_init(Q, true),
-              {{_Msg1, true, _AckTag1}, VQ2} =
-                  rabbit_variable_queue:fetch(true, VQ1),
-              CountMinusOne = rabbit_variable_queue:len(VQ2),
-              _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
-              rabbit_amqqueue:internal_delete(QName)
-      end),
-    passed.
-
-test_variable_queue_delete_msg_store_files_callback() ->
-    ok = restart_msg_store_empty(),
-    {new, #amqqueue { pid = QPid, name = QName } = Q} =
-        rabbit_amqqueue:declare(test_queue(), true, false, [], none),
-    Payload = <<0:8388608>>, %% 1MB
-    Count = 30,
-    publish_and_confirm(Q, Payload, Count),
-
-    rabbit_amqqueue:set_ram_duration_target(QPid, 0),
-
-    {ok, Limiter} = rabbit_limiter:start_link(no_id),
-
-    CountMinusOne = Count - 1,
-    {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} =
-        rabbit_amqqueue:basic_get(Q, self(), true, Limiter),
-    {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
-
-    %% give the queue a second to receive the close_fds callback msg
-    timer:sleep(1000),
-
-    rabbit_amqqueue:delete(Q, false, false),
-    passed.
-
-test_configurable_server_properties() ->
-    %% List of the names of the built-in properties do we expect to find
-    BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
-                        <<"copyright">>, <<"information">>],
-
-    Protocol = rabbit_framing_amqp_0_9_1,
-
-    %% Verify that the built-in properties are initially present
-    ActualPropNames = [Key || {Key, longstr, _} <-
-                                  rabbit_reader:server_properties(Protocol)],
-    true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
-                     BuiltInPropNames),
-
-    %% Get the initial server properties configured in the environment
-    {ok, ServerProperties} = application:get_env(rabbit, server_properties),
-
-    %% Helper functions
-    ConsProp = fun (X) -> application:set_env(rabbit,
-                                              server_properties,
-                                              [X | ServerProperties]) end,
-    IsPropPresent =
-        fun (X) ->
-                lists:member(X, rabbit_reader:server_properties(Protocol))
-        end,
-
-    %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
-    NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
-    ConsProp(NewSimplifiedProperty),
-    %% Do we find hare soup, appropriately formatted in the generated properties?
-    ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
-                         longstr,
-                         list_to_binary(NewHareVal)},
-    true = IsPropPresent(ExpectedHareImage),
-
-    %% Add a wholly new property of the {BinaryKey, Type, Value} form
-    %% and check for it
-    NewProperty = {<<"new-bin-key">>, signedint, -1},
-    ConsProp(NewProperty),
-    %% Do we find the new property?
-    true = IsPropPresent(NewProperty),
-
-    %% Add a property that clobbers a built-in, and verify correct clobbering
-    {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
-    {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
-                                    list_to_binary(NewVerVal)},
-    ConsProp(NewVersion),
-    ClobberedServerProps = rabbit_reader:server_properties(Protocol),
-    %% Is the clobbering insert present?
-    true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
-    %% Is the clobbering insert the only thing with the clobbering key?
-    [{BinNewVerKey, longstr, BinNewVerVal}] =
-        [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
-
-    application:set_env(rabbit, server_properties, ServerProperties),
-    passed.
-
-nop(_) -> ok.
-nop(_, _) -> ok.
diff --git a/rabbitmq-server/src/rabbit_tests_event_receiver.erl b/rabbitmq-server/src/rabbit_tests_event_receiver.erl
deleted file mode 100644 (file)
index ccbdcbc..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_tests_event_receiver).
-
--export([start/3, stop/0]).
-
--export([init/1, handle_call/2, handle_event/2, handle_info/2,
-         terminate/2, code_change/3]).
-
--include("rabbit.hrl").
-
-start(Pid, Nodes, Types) ->
-    Oks = [ok || _ <- Nodes],
-    {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
-                             [rabbit_event, ?MODULE, [Pid, Types]]).
-
-stop() ->
-    gen_event:delete_handler(rabbit_event, ?MODULE, []).
-
-%%----------------------------------------------------------------------------
-
-init([Pid, Types]) ->
-    {ok, {Pid, Types}}.
-
-handle_call(_Request, State) ->
-    {ok, not_understood, State}.
-
-handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
-    case lists:member(Type, Types) of
-        true  -> Pid ! Event;
-        false -> ok
-    end,
-    {ok, State}.
-
-handle_info(_Info, State) ->
-    {ok, State}.
-
-terminate(_Arg, _State) ->
-    ok.
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-%%----------------------------------------------------------------------------
index aafd81df9cbf3509551235568eaf774f07e478ee..49b16078fc772200799d1c3e771d36b06f79bf21 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_trace).
 
--export([init/1, enabled/1, tap_in/2, tap_out/2, start/1, stop/1]).
+-export([init/1, enabled/1, tap_in/6, tap_out/5, start/1, stop/1]).
 
 -include("rabbit.hrl").
 -include("rabbit_framing.hrl").
 
 -spec(init/1 :: (rabbit_types:vhost()) -> state()).
 -spec(enabled/1 :: (rabbit_types:vhost()) -> boolean()).
--spec(tap_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok').
--spec(tap_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok').
+-spec(tap_in/6 :: (rabbit_types:basic_message(), [rabbit_amqqueue:name()],
+                   binary(), rabbit_channel:channel_number(),
+                   rabbit_types:username(), state()) -> 'ok').
+-spec(tap_out/5 :: (rabbit_amqqueue:qmsg(), binary(),
+                    rabbit_channel:channel_number(),
+                    rabbit_types:username(), state()) -> 'ok').
 
 -spec(start/1 :: (rabbit_types:vhost()) -> 'ok').
 -spec(stop/1 :: (rabbit_types:vhost()) -> 'ok').
@@ -54,15 +58,29 @@ enabled(VHost) ->
     {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS),
     lists:member(VHost, VHosts).
 
-tap_in(_Msg, none) -> ok;
-tap_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, TraceX) ->
-    trace(TraceX, Msg, <<"publish">>, XName, []).
-
-tap_out(_Msg, none) -> ok;
-tap_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceX) ->
+tap_in(_Msg, _QNames, _ConnName, _ChannelNum, _Username, none) -> ok;
+tap_in(Msg = #basic_message{exchange_name = #resource{name         = XName,
+                                                      virtual_host = VHost}},
+       QNames, ConnName, ChannelNum, Username, TraceX) ->
+    trace(TraceX, Msg, <<"publish">>, XName,
+          [{<<"vhost">>,         longstr,   VHost},
+           {<<"connection">>,    longstr,   ConnName},
+           {<<"channel">>,       signedint, ChannelNum},
+           {<<"user">>,          longstr,   Username},
+           {<<"routed_queues">>, array,
+            [{longstr, QName#resource.name} || QName <- QNames]}]).
+
+tap_out(_Msg, _ConnName, _ChannelNum, _Username, none) -> ok;
+tap_out({#resource{name = QName, virtual_host = VHost},
+         _QPid, _QMsgId, Redelivered, Msg},
+        ConnName, ChannelNum, Username, TraceX) ->
     RedeliveredNum = case Redelivered of true -> 1; false -> 0 end,
     trace(TraceX, Msg, <<"deliver">>, QName,
-          [{<<"redelivered">>, signedint, RedeliveredNum}]).
+          [{<<"redelivered">>, signedint, RedeliveredNum},
+           {<<"vhost">>,       longstr,   VHost},
+           {<<"connection">>,  longstr,   ConnName},
+           {<<"channel">>,     signedint, ChannelNum},
+           {<<"user">>,        longstr,   Username}]).
 
 %%----------------------------------------------------------------------------
 
index ba48867ad0e41cfadb58d068df1bd97588311f7c..3e2b5ba0c17de17e41dc8aa86e6715089f2744ab 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_types).
@@ -27,7 +27,7 @@
               vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0,
               binding/0, binding_source/0, binding_destination/0,
               amqqueue/0, exchange/0,
-              connection/0, protocol/0, user/0, internal_user/0,
+              connection/0, protocol/0, auth_user/0, user/0, internal_user/0,
               username/0, password/0, password_hash/0,
               ok/1, error/1, ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0,
               channel_exit/0, connection_exit/0, mfargs/0, proc_name/0,
 
 -type(protocol() :: rabbit_framing:protocol()).
 
+-type(auth_user() ::
+        #auth_user{username :: username(),
+                   tags     :: [atom()],
+                   impl     :: any()}).
+
 -type(user() ::
-        #user{username     :: username(),
-              tags         :: [atom()],
-              auth_backend :: atom(),
-              impl         :: any()}).
+        #user{username       :: username(),
+              tags           :: [atom()],
+              authz_backends :: [{atom(), any()}]}).
 
 -type(internal_user() ::
         #internal_user{username      :: username(),
index 8ab35a89dd0487a016bf4a0ea8f4308dd7e60955..daf39b8acc0b8711fe5b3ad30a00dc0e0c1e658b 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_upgrade).
 
--export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]).
+-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0,
+         nodes_running/1, secondary_upgrade/1]).
 
 -include("rabbit.hrl").
 
@@ -122,6 +123,7 @@ remove_backup() ->
 
 maybe_upgrade_mnesia() ->
     AllNodes = rabbit_mnesia:cluster_nodes(all),
+    ok = rabbit_mnesia_rename:maybe_finish(AllNodes),
     case rabbit_version:upgrades_required(mnesia) of
         {error, starting_from_scratch} ->
             ok;
@@ -190,7 +192,7 @@ die(Msg, Args) ->
     %% We don't throw or exit here since that gets thrown
     %% straight out into do_boot, generating an erl_crash.dump
     %% and displaying any error message in a confusing way.
-    error_logger:error_msg(Msg, Args),
+    rabbit_log:error(Msg, Args),
     Str = rabbit_misc:format(
             "~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args),
     io:format(Str),
@@ -281,6 +283,4 @@ node_type_legacy() ->
         false -> ram
     end.
 
-%% NB: we cannot use rabbit_log here since it may not have been
-%% started yet
-info(Msg, Args) -> error_logger:info_msg(Msg, Args).
+info(Msg, Args) -> rabbit_log:info(Msg, Args).
index b6d378525e506a45e56361c08c1053e733ca152f..4eced3f32fa934be79ad556f8d3f2d5f5538a524 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_upgrade_functions).
@@ -48,6 +48,9 @@
 -rabbit_upgrade({queue_decorators,      mnesia, [gm_pids]}).
 -rabbit_upgrade({internal_system_x,     mnesia, [exchange_decorators]}).
 -rabbit_upgrade({cluster_name,          mnesia, [runtime_parameters]}).
+-rabbit_upgrade({down_slave_nodes,      mnesia, [queue_decorators]}).
+-rabbit_upgrade({queue_state,           mnesia, [down_slave_nodes]}).
+-rabbit_upgrade({recoverable_slaves,    mnesia, [queue_state]}).
 
 %% -------------------------------------------------------------------
 
 -spec(policy_apply_to/0       :: () -> 'ok').
 -spec(queue_decorators/0      :: () -> 'ok').
 -spec(internal_system_x/0     :: () -> 'ok').
+-spec(cluster_name/0          :: () -> 'ok').
+-spec(down_slave_nodes/0      :: () -> 'ok').
+-spec(queue_state/0           :: () -> 'ok').
+-spec(recoverable_slaves/0    :: () -> 'ok').
 
 -endif.
 
@@ -382,6 +389,49 @@ cluster_name_tx() ->
     [mnesia:delete(T, K, write) || K <- Ks],
     ok.
 
+down_slave_nodes() ->
+    ok = down_slave_nodes(rabbit_queue),
+    ok = down_slave_nodes(rabbit_durable_queue).
+
+down_slave_nodes(Table) ->
+    transform(
+      Table,
+      fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+            Pid, SlavePids, SyncSlavePids, Policy, GmPids, Decorators}) ->
+              {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+               Pid, SlavePids, SyncSlavePids, [], Policy, GmPids, Decorators}
+      end,
+      [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+       sync_slave_pids, down_slave_nodes, policy, gm_pids, decorators]).
+
+queue_state() ->
+    ok = queue_state(rabbit_queue),
+    ok = queue_state(rabbit_durable_queue).
+
+queue_state(Table) ->
+    transform(
+      Table,
+      fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+            Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators}) ->
+              {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+               Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+               live}
+      end,
+      [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+       sync_slave_pids, down_slave_nodes, policy, gm_pids, decorators, state]).
+
+recoverable_slaves() ->
+    ok = recoverable_slaves(rabbit_queue),
+    ok = recoverable_slaves(rabbit_durable_queue).
+
+recoverable_slaves(Table) ->
+    transform(
+      Table, fun (Q) -> Q end, %% Don't change shape of record
+      [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+       sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators,
+       state]).
+
+
 %%--------------------------------------------------------------------
 
 transform(TableName, Fun, FieldList) ->
index ede697481c3591d7bb6bd3cb8fccac909339a24f..691e4ce2e2191acda3c169dbe94492f7dea1fdac 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_variable_queue).
 
--export([init/3, terminate/2, delete_and_terminate/2, purge/1, purge_acks/1,
-         publish/5, publish_delivered/4, discard/3, drain_confirmed/1,
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+         purge/1, purge_acks/1,
+         publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
          dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
          ackfold/4, fold/3, len/1, is_empty/1, depth/1,
          set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
          handle_pre_hibernate/1, resume/1, msg_rates/1,
-         status/1, invoke/3, is_duplicate/2, multiple_routing_keys/0]).
+         info/2, invoke/3, is_duplicate/2, multiple_routing_keys/0]).
 
 -export([start/1, stop/0]).
 
 %% exported for testing only
--export([start_msg_store/2, stop_msg_store/0, init/5]).
+-export([start_msg_store/2, stop_msg_store/0, init/6]).
 
 %%----------------------------------------------------------------------------
+%% Messages, and their position in the queue, can be in memory or on
+%% disk, or both. Persistent messages will have both message and
+%% position pushed to disk as soon as they arrive; transient messages
+%% can be written to disk (and thus both types can be evicted from
+%% memory) under memory pressure. The question of whether a message is
+%% in RAM and whether it is persistent are orthogonal.
+%%
+%% Messages are persisted using the queue index and the message
+%% store. Normally the queue index holds the position of the message
+%% *within this queue* along with a couple of small bits of metadata,
+%% while the message store holds the message itself (including headers
+%% and other properties).
+%%
+%% However, as an optimisation, small messages can be embedded
+%% directly in the queue index and bypass the message store
+%% altogether.
+%%
 %% Definitions:
-
+%%
 %% alpha: this is a message where both the message itself, and its
 %%        position within the queue are held in RAM
 %%
-%% beta: this is a message where the message itself is only held on
-%%        disk, but its position within the queue is held in RAM.
+%% beta:  this is a message where the message itself is only held on
+%%        disk (if persisted to the message store) but its position
+%%        within the queue is held in RAM.
 %%
 %% gamma: this is a message where the message itself is only held on
 %%        disk, but its position is both in RAM and on disk.
           q3,
           q4,
           next_seq_id,
-          ram_pending_ack,
-          disk_pending_ack,
+          ram_pending_ack,    %% msgs using store, still in RAM
+          disk_pending_ack,   %% msgs in store, paged out
+          qi_pending_ack,     %% msgs using qi, *can't* be paged out
           index_state,
           msg_store_clients,
           durable,
           transient_threshold,
 
-          len,
-          persistent_count,
+          len,                %% w/o unacked
+          bytes,              %% w/o unacked
+          unacked_bytes,
+          persistent_count,   %% w   unacked
+          persistent_bytes,   %% w   unacked
 
           target_ram_count,
-          ram_msg_count,
+          ram_msg_count,      %% w/o unacked
           ram_msg_count_prev,
           ram_ack_count_prev,
+          ram_bytes,          %% w   unacked
           out_counter,
           in_counter,
           rates,
           unconfirmed,
           confirmed,
           ack_out_counter,
-          ack_in_counter
+          ack_in_counter,
+          %% Unlike the other counters these two do not feed into
+          %% #rates{} and get reset
+          disk_read_count,
+          disk_write_count
         }).
 
 -record(rates, { in, out, ack_in, ack_out, timestamp }).
           msg,
           is_persistent,
           is_delivered,
-          msg_on_disk,
+          msg_in_store,
           index_on_disk,
+          persist_to,
           msg_props
         }).
 
 %% betas, the IO_BATCH_SIZE sets the number of betas that we must be
 %% due to write indices for before we do any work at all.
 -define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND
+-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
 -define(PERSISTENT_MSG_STORE, msg_store_persistent).
 -define(TRANSIENT_MSG_STORE,  msg_store_transient).
 -define(QUEUE, lqueue).
 
 -include("rabbit.hrl").
+-include("rabbit_framing.hrl").
 
 %%----------------------------------------------------------------------------
 
              q3                    :: ?QUEUE:?QUEUE(),
              q4                    :: ?QUEUE:?QUEUE(),
              next_seq_id           :: seq_id(),
-             ram_pending_ack       :: gb_tree(),
-             disk_pending_ack      :: gb_tree(),
+             ram_pending_ack       :: gb_trees:tree(),
+             disk_pending_ack      :: gb_trees:tree(),
+             qi_pending_ack        :: gb_trees:tree(),
              index_state           :: any(),
              msg_store_clients     :: 'undefined' | {{any(), binary()},
                                                     {any(), binary()}},
              transient_threshold   :: non_neg_integer(),
 
              len                   :: non_neg_integer(),
+             bytes                 :: non_neg_integer(),
+             unacked_bytes         :: non_neg_integer(),
+
              persistent_count      :: non_neg_integer(),
+             persistent_bytes      :: non_neg_integer(),
 
              target_ram_count      :: non_neg_integer() | 'infinity',
              ram_msg_count         :: non_neg_integer(),
              ram_msg_count_prev    :: non_neg_integer(),
+             ram_ack_count_prev    :: non_neg_integer(),
+             ram_bytes             :: non_neg_integer(),
              out_counter           :: non_neg_integer(),
              in_counter            :: non_neg_integer(),
              rates                 :: rates(),
-             msgs_on_disk          :: gb_set(),
-             msg_indices_on_disk   :: gb_set(),
-             unconfirmed           :: gb_set(),
-             confirmed             :: gb_set(),
+             msgs_on_disk          :: gb_sets:set(),
+             msg_indices_on_disk   :: gb_sets:set(),
+             unconfirmed           :: gb_sets:set(),
+             confirmed             :: gb_sets:set(),
              ack_out_counter       :: non_neg_integer(),
-             ack_in_counter        :: non_neg_integer() }).
+             ack_in_counter        :: non_neg_integer(),
+             disk_read_count       :: non_neg_integer(),
+             disk_write_count      :: non_neg_integer() }).
 %% Duplicated from rabbit_backing_queue
 -spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
 
@@ -415,17 +455,20 @@ stop_msg_store() ->
     ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE),
     ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE).
 
-init(Queue, Recover, AsyncCallback) ->
-    init(Queue, Recover, AsyncCallback,
-         fun (MsgIds, ActionTaken) ->
-                 msgs_written_to_disk(AsyncCallback, MsgIds, ActionTaken)
-         end,
-         fun (MsgIds) -> msg_indices_written_to_disk(AsyncCallback, MsgIds) end).
+init(Queue, Recover, Callback) ->
+    init(
+      Queue, Recover, Callback,
+      fun (MsgIds, ActionTaken) ->
+              msgs_written_to_disk(Callback, MsgIds, ActionTaken)
+      end,
+      fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end,
+      fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end).
 
 init(#amqqueue { name = QueueName, durable = IsDurable }, new,
-     AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) ->
-    IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun),
-    init(IsDurable, IndexState, 0, [],
+     AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
+    IndexState = rabbit_queue_index:init(QueueName,
+                                         MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+    init(IsDurable, IndexState, 0, 0, [],
          case IsDurable of
              true  -> msg_store_client_init(?PERSISTENT_MSG_STORE,
                                             MsgOnDiskFun, AsyncCallback);
@@ -433,22 +476,29 @@ init(#amqqueue { name = QueueName, durable = IsDurable }, new,
          end,
          msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback));
 
-init(#amqqueue { name = QueueName, durable = true }, Terms,
-     AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) ->
+%% We can be recovering a transient queue if it crashed
+init(#amqqueue { name = QueueName, durable = IsDurable }, Terms,
+     AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
     {PRef, RecoveryTerms} = process_recovery_terms(Terms),
-    PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
-                                             MsgOnDiskFun, AsyncCallback),
+    {PersistentClient, ContainsCheckFun} =
+        case IsDurable of
+            true  -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
+                                               MsgOnDiskFun, AsyncCallback),
+                     {C, fun (MsgId) when is_binary(MsgId) ->
+                                 rabbit_msg_store:contains(MsgId, C);
+                             (#basic_message{is_persistent = Persistent}) ->
+                                 Persistent
+                         end};
+            false -> {undefined, fun(_MsgId) -> false end}
+        end,
     TransientClient  = msg_store_client_init(?TRANSIENT_MSG_STORE,
                                              undefined, AsyncCallback),
-    {DeltaCount, IndexState} =
+    {DeltaCount, DeltaBytes, IndexState} =
         rabbit_queue_index:recover(
           QueueName, RecoveryTerms,
           rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE),
-          fun (MsgId) ->
-                  rabbit_msg_store:contains(MsgId, PersistentClient)
-          end,
-          MsgIdxOnDiskFun),
-    init(true, IndexState, DeltaCount, RecoveryTerms,
+          ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+    init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms,
          PersistentClient, TransientClient).
 
 process_recovery_terms(Terms=non_clean_shutdown) ->
@@ -461,6 +511,7 @@ process_recovery_terms(Terms) ->
 
 terminate(_Reason, State) ->
     State1 = #vqstate { persistent_count  = PCount,
+                        persistent_bytes  = PBytes,
                         index_state       = IndexState,
                         msg_store_clients = {MSCStateP, MSCStateT} } =
         purge_pending_ack(true, State),
@@ -470,7 +521,9 @@ terminate(_Reason, State) ->
                             rabbit_msg_store:client_ref(MSCStateP)
            end,
     ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
-    Terms = [{persistent_ref, PRef}, {persistent_count, PCount}],
+    Terms = [{persistent_ref,   PRef},
+             {persistent_count, PCount},
+             {persistent_bytes, PBytes}],
     a(State1 #vqstate { index_state       = rabbit_queue_index:terminate(
                                               Terms, IndexState),
                         msg_store_clients = undefined }).
@@ -494,44 +547,33 @@ delete_and_terminate(_Reason, State) ->
     a(State2 #vqstate { index_state       = IndexState1,
                         msg_store_clients = undefined }).
 
-purge(State = #vqstate { q4                = Q4,
-                         index_state       = IndexState,
-                         msg_store_clients = MSCState,
-                         len               = Len,
-                         persistent_count  = PCount }) ->
+delete_crashed(#amqqueue{name = QName}) ->
+    ok = rabbit_queue_index:erase(QName).
+
+purge(State = #vqstate { q4  = Q4,
+                         len = Len }) ->
     %% TODO: when there are no pending acks, which is a common case,
     %% we could simply wipe the qi instead of issuing delivers and
     %% acks for all the messages.
-    {LensByStore, IndexState1} = remove_queue_entries(
-                                   fun ?QUEUE:foldl/3, Q4,
-                                   orddict:new(), IndexState, MSCState),
-    {LensByStore1, State1 = #vqstate { q1                = Q1,
-                                       index_state       = IndexState2,
-                                       msg_store_clients = MSCState1 }} =
-        purge_betas_and_deltas(LensByStore,
-                               State #vqstate { q4          = ?QUEUE:new(),
-                                                index_state = IndexState1 }),
-    {LensByStore2, IndexState3} = remove_queue_entries(
-                                    fun ?QUEUE:foldl/3, Q1,
-                                    LensByStore1, IndexState2, MSCState1),
-    PCount1 = PCount - find_persistent_count(LensByStore2),
-    {Len, a(State1 #vqstate { q1                = ?QUEUE:new(),
-                              index_state       = IndexState3,
-                              len               = 0,
-                              ram_msg_count     = 0,
-                              persistent_count  = PCount1 })}.
+    State1 = remove_queue_entries(Q4, State),
+
+    State2 = #vqstate { q1 = Q1 } =
+        purge_betas_and_deltas(State1 #vqstate { q4 = ?QUEUE:new() }),
+
+    State3 = remove_queue_entries(Q1, State2),
+
+    {Len, a(State3 #vqstate { q1 = ?QUEUE:new() })}.
 
 purge_acks(State) -> a(purge_pending_ack(false, State)).
 
 publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
         MsgProps = #message_properties { needs_confirming = NeedsConfirming },
-        IsDelivered, _ChPid, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
-                                                next_seq_id      = SeqId,
-                                                len              = Len,
-                                                in_counter       = InCount,
-                                                persistent_count = PCount,
-                                                durable          = IsDurable,
-                                                unconfirmed      = UC }) ->
+        IsDelivered, _ChPid, _Flow,
+        State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+                           next_seq_id      = SeqId,
+                           in_counter       = InCount,
+                           durable          = IsDurable,
+                           unconfirmed      = UC }) ->
     IsPersistent1 = IsDurable andalso IsPersistent,
     MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps),
     {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
@@ -540,39 +582,36 @@ publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
                  true  -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
              end,
     InCount1 = InCount + 1,
-    PCount1  = PCount  + one_if(IsPersistent1),
     UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
-    State3 = inc_ram_msg_count(State2 #vqstate { next_seq_id      = SeqId + 1,
-                                                 len              = Len   + 1,
-                                                 in_counter       = InCount1,
-                                                 persistent_count = PCount1,
-                                                 unconfirmed      = UC1 }),
+    State3 = stats({1, 0}, {none, MsgStatus1},
+                   State2#vqstate{ next_seq_id = SeqId + 1,
+                                   in_counter  = InCount1,
+                                   unconfirmed = UC1 }),
     a(reduce_memory_use(maybe_update_rates(State3))).
 
 publish_delivered(Msg = #basic_message { is_persistent = IsPersistent,
                                          id = MsgId },
                   MsgProps = #message_properties {
                     needs_confirming = NeedsConfirming },
-                  _ChPid, State = #vqstate { next_seq_id      = SeqId,
-                                             out_counter      = OutCount,
-                                             in_counter       = InCount,
-                                             persistent_count = PCount,
-                                             durable          = IsDurable,
-                                             unconfirmed      = UC }) ->
+                  _ChPid, _Flow,
+                  State = #vqstate { next_seq_id      = SeqId,
+                                     out_counter      = OutCount,
+                                     in_counter       = InCount,
+                                     durable          = IsDurable,
+                                     unconfirmed      = UC }) ->
     IsPersistent1 = IsDurable andalso IsPersistent,
     MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps),
     {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
     State2 = record_pending_ack(m(MsgStatus1), State1),
-    PCount1 = PCount + one_if(IsPersistent1),
     UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
-    State3 = State2 #vqstate { next_seq_id      = SeqId    + 1,
-                               out_counter      = OutCount + 1,
-                               in_counter       = InCount  + 1,
-                               persistent_count = PCount1,
-                               unconfirmed      = UC1 },
+    State3 = stats({0, 1}, {none, MsgStatus1},
+                   State2 #vqstate { next_seq_id      = SeqId    + 1,
+                                     out_counter      = OutCount + 1,
+                                     in_counter       = InCount  + 1,
+                                     unconfirmed      = UC1 }),
     {SeqId, a(reduce_memory_use(maybe_update_rates(State3)))}.
 
-discard(_MsgId, _ChPid, State) -> State.
+discard(_MsgId, _ChPid, _Flow, State) -> State.
 
 drain_confirmed(State = #vqstate { confirmed = C }) ->
     case gb_sets:is_empty(C) of
@@ -634,45 +673,38 @@ ack([], State) ->
 ack([SeqId], State) ->
     {#msg_status { msg_id        = MsgId,
                    is_persistent = IsPersistent,
-                   msg_on_disk   = MsgOnDisk,
+                   msg_in_store  = MsgInStore,
                    index_on_disk = IndexOnDisk },
      State1 = #vqstate { index_state       = IndexState,
                          msg_store_clients = MSCState,
-                         persistent_count  = PCount,
                          ack_out_counter   = AckOutCount }} =
-        remove_pending_ack(SeqId, State),
+        remove_pending_ack(true, SeqId, State),
     IndexState1 = case IndexOnDisk of
                       true  -> rabbit_queue_index:ack([SeqId], IndexState);
                       false -> IndexState
                   end,
-    case MsgOnDisk of
+    case MsgInStore of
         true  -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
         false -> ok
     end,
-    PCount1 = PCount - one_if(IsPersistent),
     {[MsgId],
      a(State1 #vqstate { index_state      = IndexState1,
-                         persistent_count = PCount1,
                          ack_out_counter  = AckOutCount + 1 })};
 ack(AckTags, State) ->
     {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
      State1 = #vqstate { index_state       = IndexState,
                          msg_store_clients = MSCState,
-                         persistent_count  = PCount,
                          ack_out_counter   = AckOutCount }} =
         lists:foldl(
           fun (SeqId, {Acc, State2}) ->
-                  {MsgStatus, State3} = remove_pending_ack(SeqId, State2),
+                  {MsgStatus, State3} = remove_pending_ack(true, SeqId, State2),
                   {accumulate_ack(MsgStatus, Acc), State3}
           end, {accumulate_ack_init(), State}, AckTags),
     IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
     [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
      || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
-    PCount1 = PCount - find_persistent_count(sum_msg_ids_by_store_to_len(
-                                               orddict:new(), MsgIdsByStore)),
     {lists:reverse(AllMsgIds),
      a(State1 #vqstate { index_state      = IndexState1,
-                         persistent_count = PCount1,
                          ack_out_counter  = AckOutCount + length(AckTags) })}.
 
 requeue(AckTags, #vqstate { delta      = Delta,
@@ -710,15 +742,18 @@ fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
     {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
                                      [msg_iterator(State),
                                       disk_ack_iterator(State),
-                                      ram_ack_iterator(State)]),
+                                      ram_ack_iterator(State),
+                                      qi_ack_iterator(State)]),
     ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
 
 len(#vqstate { len = Len }) -> Len.
 
 is_empty(State) -> 0 == len(State).
 
-depth(State = #vqstate { ram_pending_ack = RPA, disk_pending_ack = DPA }) ->
-    len(State) + gb_trees:size(RPA) + gb_trees:size(DPA).
+depth(State = #vqstate { ram_pending_ack  = RPA,
+                         disk_pending_ack = DPA,
+                         qi_pending_ack   = QPA }) ->
+    len(State) + gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
 
 set_ram_duration_target(
   DurationTarget, State = #vqstate {
@@ -784,10 +819,11 @@ ram_duration(State) ->
                         ram_msg_count      = RamMsgCount,
                         ram_msg_count_prev = RamMsgCountPrev,
                         ram_pending_ack    = RPA,
+                        qi_pending_ack     = QPA,
                         ram_ack_count_prev = RamAckCountPrev } =
         update_rates(State),
 
-    RamAckCount = gb_trees:size(RPA),
+    RamAckCount = gb_trees:size(RPA) + gb_trees:size(QPA),
 
     Duration = %% msgs+acks / (msgs+acks/sec) == sec
         case lists:all(fun (X) -> X < 0.01 end,
@@ -821,15 +857,35 @@ msg_rates(#vqstate { rates = #rates { in  = AvgIngressRate,
                                       out = AvgEgressRate } }) ->
     {AvgIngressRate, AvgEgressRate}.
 
-status(#vqstate {
+info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) ->
+    RamMsgCount;
+info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA,
+                                           qi_pending_ack  = QPA}) ->
+    gb_trees:size(RPA) + gb_trees:size(QPA);
+info(messages_ram, State) ->
+    info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State);
+info(messages_persistent, #vqstate{persistent_count = PersistentCount}) ->
+    PersistentCount;
+info(message_bytes, #vqstate{bytes         = Bytes,
+                             unacked_bytes = UBytes}) ->
+    Bytes + UBytes;
+info(message_bytes_ready, #vqstate{bytes = Bytes}) ->
+    Bytes;
+info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) ->
+    UBytes;
+info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
+    RamBytes;
+info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
+    PersistentBytes;
+info(disk_reads, #vqstate{disk_read_count = Count}) ->
+    Count;
+info(disk_writes, #vqstate{disk_write_count = Count}) ->
+    Count;
+info(backing_queue_status, #vqstate {
           q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
           len              = Len,
-          ram_pending_ack  = RPA,
-          disk_pending_ack = DPA,
           target_ram_count = TargetRamCount,
-          ram_msg_count    = RamMsgCount,
           next_seq_id      = NextSeqId,
-          persistent_count = PersistentCount,
           rates            = #rates { in      = AvgIngressRate,
                                       out     = AvgEgressRate,
                                       ack_in  = AvgAckIngressRate,
@@ -841,16 +897,14 @@ status(#vqstate {
       {q3                  , ?QUEUE:len(Q3)},
       {q4                  , ?QUEUE:len(Q4)},
       {len                 , Len},
-      {pending_acks        , gb_trees:size(RPA) + gb_trees:size(DPA)},
       {target_ram_count    , TargetRamCount},
-      {ram_msg_count       , RamMsgCount},
-      {ram_ack_count       , gb_trees:size(RPA)},
       {next_seq_id         , NextSeqId},
-      {persistent_count    , PersistentCount},
       {avg_ingress_rate    , AvgIngressRate},
       {avg_egress_rate     , AvgEgressRate},
       {avg_ack_ingress_rate, AvgAckIngressRate},
-      {avg_ack_egress_rate , AvgAckEgressRate} ].
+      {avg_ack_egress_rate , AvgAckEgressRate} ];
+info(Item, _) ->
+    throw({bad_argument, Item}).
 
 invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
 invoke(      _,   _, State) -> State.
@@ -863,8 +917,12 @@ is_duplicate(_Msg, State) -> {false, State}.
 
 a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
                      len              = Len,
+                     bytes            = Bytes,
+                     unacked_bytes    = UnackedBytes,
                      persistent_count = PersistentCount,
-                     ram_msg_count    = RamMsgCount }) ->
+                     persistent_bytes = PersistentBytes,
+                     ram_msg_count    = RamMsgCount,
+                     ram_bytes        = RamBytes}) ->
     E1 = ?QUEUE:is_empty(Q1),
     E2 = ?QUEUE:is_empty(Q2),
     ED = Delta#delta.count == 0,
@@ -878,9 +936,14 @@ a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
     true = LZ == (E3 and E4),
 
     true = Len             >= 0,
+    true = Bytes           >= 0,
+    true = UnackedBytes    >= 0,
     true = PersistentCount >= 0,
+    true = PersistentBytes >= 0,
     true = RamMsgCount     >= 0,
     true = RamMsgCount     =< Len,
+    true = RamBytes        >= 0,
+    true = RamBytes        =< Bytes + UnackedBytes,
 
     State.
 
@@ -888,14 +951,11 @@ d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
   when Start + Count =< End ->
     Delta.
 
-m(MsgStatus = #msg_status { msg           = Msg,
-                            is_persistent = IsPersistent,
-                            msg_on_disk   = MsgOnDisk,
+m(MsgStatus = #msg_status { is_persistent = IsPersistent,
+                            msg_in_store  = MsgInStore,
                             index_on_disk = IndexOnDisk }) ->
     true = (not IsPersistent) or IndexOnDisk,
-    true = (not IndexOnDisk) or MsgOnDisk,
-    true = (Msg =/= undefined) or MsgOnDisk,
-
+    true = msg_in_ram(MsgStatus) or MsgInStore,
     MsgStatus.
 
 one_if(true ) -> 1;
@@ -914,21 +974,39 @@ msg_status(IsPersistent, IsDelivered, SeqId,
                 msg           = Msg,
                 is_persistent = IsPersistent,
                 is_delivered  = IsDelivered,
-                msg_on_disk   = false,
+                msg_in_store  = false,
                 index_on_disk = false,
+                persist_to    = determine_persist_to(Msg, MsgProps),
                 msg_props     = MsgProps}.
 
+beta_msg_status({Msg = #basic_message{id = MsgId},
+                 SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+    MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+    MS0#msg_status{msg_id       = MsgId,
+                   msg          = Msg,
+                   persist_to   = queue_index,
+                   msg_in_store = false};
+
 beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+    MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+    MS0#msg_status{msg_id       = MsgId,
+                   msg          = undefined,
+                   persist_to   = msg_store,
+                   msg_in_store = true}.
+
+beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered) ->
   #msg_status{seq_id        = SeqId,
-              msg_id        = MsgId,
               msg           = undefined,
               is_persistent = IsPersistent,
               is_delivered  = IsDelivered,
-              msg_on_disk   = true,
               index_on_disk = true,
               msg_props     = MsgProps}.
 
-trim_msg_status(MsgStatus) -> MsgStatus #msg_status { msg = undefined }.
+trim_msg_status(MsgStatus) ->
+    case persist_to(MsgStatus) of
+        msg_store   -> MsgStatus#msg_status{msg = undefined};
+        queue_index -> MsgStatus
+    end.
 
 with_msg_store_state({MSCStateP, MSCStateT},  true, Fun) ->
     {Result, MSCStateP1} = Fun(MSCStateP),
@@ -990,26 +1068,36 @@ maybe_write_delivered(false, _SeqId, IndexState) ->
 maybe_write_delivered(true, SeqId, IndexState) ->
     rabbit_queue_index:deliver([SeqId], IndexState).
 
-betas_from_index_entries(List, TransientThreshold, RPA, DPA, IndexState) ->
-    {Filtered, Delivers, Acks} =
+betas_from_index_entries(List, TransientThreshold, RPA, DPA, QPA, IndexState) ->
+    {Filtered, Delivers, Acks, RamReadyCount, RamBytes} =
         lists:foldr(
-          fun ({_MsgId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
-               {Filtered1, Delivers1, Acks1} = Acc) ->
+          fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
+               {Filtered1, Delivers1, Acks1, RRC, RB} = Acc) ->
                   case SeqId < TransientThreshold andalso not IsPersistent of
                       true  -> {Filtered1,
                                 cons_if(not IsDelivered, SeqId, Delivers1),
-                                [SeqId | Acks1]};
-                      false -> case (gb_trees:is_defined(SeqId, RPA) orelse
-                                     gb_trees:is_defined(SeqId, DPA)) of
-                                   false -> {?QUEUE:in_r(m(beta_msg_status(M)),
-                                                         Filtered1),
-                                             Delivers1, Acks1};
-                                   true  -> Acc
-                           end
+                                [SeqId | Acks1], RRC, RB};
+                      false -> MsgStatus = m(beta_msg_status(M)),
+                               HaveMsg = msg_in_ram(MsgStatus),
+                               Size = msg_size(MsgStatus),
+                               case (gb_trees:is_defined(SeqId, RPA) orelse
+                                     gb_trees:is_defined(SeqId, DPA) orelse
+                                     gb_trees:is_defined(SeqId, QPA)) of
+                                   false -> {?QUEUE:in_r(MsgStatus, Filtered1),
+                                             Delivers1, Acks1,
+                                             RRC + one_if(HaveMsg),
+                                             RB + one_if(HaveMsg) * Size};
+                                   true  -> Acc %% [0]
+                               end
                   end
-          end, {?QUEUE:new(), [], []}, List),
-    {Filtered, rabbit_queue_index:ack(
-                 Acks, rabbit_queue_index:deliver(Delivers, IndexState))}.
+          end, {?QUEUE:new(), [], [], 0, 0}, List),
+    {Filtered, RamReadyCount, RamBytes,
+     rabbit_queue_index:ack(
+       Acks, rabbit_queue_index:deliver(Delivers, IndexState))}.
+%% [0] We don't increase RamBytes here, even though it pertains to
+%% unacked messages too, since if HaveMsg then the message must have
+%% been stored in the QI, thus the message must have been in
+%% qi_pending_ack, thus it must already have been in RAM.
 
 expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) ->
     d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 });
@@ -1028,15 +1116,17 @@ expand_delta(_SeqId, #delta { count       = Count } = Delta) ->
 %% Internal major helpers for Public API
 %%----------------------------------------------------------------------------
 
-init(IsDurable, IndexState, DeltaCount, Terms,
+init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
      PersistentClient, TransientClient) ->
     {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
 
-    DeltaCount1 =
+    {DeltaCount1, DeltaBytes1} =
         case Terms of
-            non_clean_shutdown -> DeltaCount;
-            _                  -> proplists:get_value(persistent_count,
-                                                      Terms, DeltaCount)
+            non_clean_shutdown -> {DeltaCount, DeltaBytes};
+            _                  -> {proplists:get_value(persistent_count,
+                                                       Terms, DeltaCount),
+                                   proplists:get_value(persistent_bytes,
+                                                       Terms, DeltaBytes)}
         end,
     Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
                 true  -> ?BLANK_DELTA;
@@ -1054,6 +1144,7 @@ init(IsDurable, IndexState, DeltaCount, Terms,
       next_seq_id         = NextSeqId,
       ram_pending_ack     = gb_trees:empty(),
       disk_pending_ack    = gb_trees:empty(),
+      qi_pending_ack      = gb_trees:empty(),
       index_state         = IndexState1,
       msg_store_clients   = {PersistentClient, TransientClient},
       durable             = IsDurable,
@@ -1061,11 +1152,15 @@ init(IsDurable, IndexState, DeltaCount, Terms,
 
       len                 = DeltaCount1,
       persistent_count    = DeltaCount1,
+      bytes               = DeltaBytes1,
+      persistent_bytes    = DeltaBytes1,
 
       target_ram_count    = infinity,
       ram_msg_count       = 0,
       ram_msg_count_prev  = 0,
       ram_ack_count_prev  = 0,
+      ram_bytes           = 0,
+      unacked_bytes       = 0,
       out_counter         = 0,
       in_counter          = 0,
       rates               = blank_rates(Now),
@@ -1074,7 +1169,9 @@ init(IsDurable, IndexState, DeltaCount, Terms,
       unconfirmed         = gb_sets:new(),
       confirmed           = gb_sets:new(),
       ack_out_counter     = 0,
-      ack_in_counter      = 0 },
+      ack_in_counter      = 0,
+      disk_read_count     = 0,
+      disk_write_count    = 0 },
     a(maybe_deltas_to_betas(State)).
 
 blank_rates(Now) ->
@@ -1090,9 +1187,9 @@ in_r(MsgStatus = #msg_status { msg = undefined },
         true  -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
         false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
                      read_msg(MsgStatus, State),
-                 inc_ram_msg_count(
-                   State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus#msg_status {
-                                                        msg = Msg }, Q4a) })
+                 MsgStatus1 = MsgStatus#msg_status{msg = Msg},
+                 stats(ready0, {MsgStatus, MsgStatus1},
+                       State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) })
     end;
 in_r(MsgStatus, State = #vqstate { q4 = Q4 }) ->
     State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) }.
@@ -1115,28 +1212,72 @@ read_msg(#msg_status{msg           = undefined,
 read_msg(#msg_status{msg = Msg}, State) ->
     {Msg, State}.
 
-read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState}) ->
+read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState,
+                                               disk_read_count   = Count}) ->
     {{ok, Msg = #basic_message {}}, MSCState1} =
         msg_store_read(MSCState, IsPersistent, MsgId),
-    {Msg, State #vqstate {msg_store_clients = MSCState1}}.
-
-inc_ram_msg_count(State = #vqstate{ram_msg_count = RamMsgCount}) ->
-    State#vqstate{ram_msg_count = RamMsgCount + 1}.
+    {Msg, State #vqstate {msg_store_clients = MSCState1,
+                          disk_read_count   = Count + 1}}.
+
+stats(Signs, Statuses, State) ->
+    stats0(expand_signs(Signs), expand_statuses(Statuses), State).
+
+expand_signs(ready0)   -> {0, 0, true};
+expand_signs({A, B})   -> {A, B, false}.
+
+expand_statuses({none, A})    -> {false,         msg_in_ram(A), A};
+expand_statuses({B,    none}) -> {msg_in_ram(B), false,         B};
+expand_statuses({B,    A})    -> {msg_in_ram(B), msg_in_ram(A), B}.
+
+%% In this function at least, we are religious: the variable name
+%% contains "Ready" or "Unacked" iff that is what it counts. If
+%% neither is present it counts both.
+stats0({DeltaReady, DeltaUnacked, ReadyMsgPaged},
+       {InRamBefore, InRamAfter, MsgStatus},
+       State = #vqstate{len              = ReadyCount,
+                        bytes            = ReadyBytes,
+                        ram_msg_count    = RamReadyCount,
+                        persistent_count = PersistentCount,
+                        unacked_bytes    = UnackedBytes,
+                        ram_bytes        = RamBytes,
+                        persistent_bytes = PersistentBytes}) ->
+    S = msg_size(MsgStatus),
+    DeltaTotal = DeltaReady + DeltaUnacked,
+    DeltaRam = case {InRamBefore, InRamAfter} of
+                   {false, false} ->  0;
+                   {false, true}  ->  1;
+                   {true,  false} -> -1;
+                   {true,  true}  ->  0
+               end,
+    DeltaRamReady = case DeltaReady of
+                        1                    -> one_if(InRamAfter);
+                        -1                   -> -one_if(InRamBefore);
+                        0 when ReadyMsgPaged -> DeltaRam;
+                        0                    -> 0
+                    end,
+    DeltaPersistent = DeltaTotal * one_if(MsgStatus#msg_status.is_persistent),
+    State#vqstate{len               = ReadyCount      + DeltaReady,
+                  ram_msg_count     = RamReadyCount   + DeltaRamReady,
+                  persistent_count  = PersistentCount + DeltaPersistent,
+                  bytes             = ReadyBytes      + DeltaReady       * S,
+                  unacked_bytes     = UnackedBytes    + DeltaUnacked     * S,
+                  ram_bytes         = RamBytes        + DeltaRam         * S,
+                  persistent_bytes  = PersistentBytes + DeltaPersistent  * S}.
+
+msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size.
+
+msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
 
 remove(AckRequired, MsgStatus = #msg_status {
                       seq_id        = SeqId,
                       msg_id        = MsgId,
-                      msg           = Msg,
                       is_persistent = IsPersistent,
                       is_delivered  = IsDelivered,
-                      msg_on_disk   = MsgOnDisk,
+                      msg_in_store  = MsgInStore,
                       index_on_disk = IndexOnDisk },
-       State = #vqstate {ram_msg_count     = RamMsgCount,
-                         out_counter       = OutCount,
+       State = #vqstate {out_counter       = OutCount,
                          index_state       = IndexState,
-                         msg_store_clients = MSCState,
-                         len               = Len,
-                         persistent_count  = PCount}) ->
+                         msg_store_clients = MSCState}) ->
     %% 1. Mark it delivered if necessary
     IndexState1 = maybe_write_delivered(
                     IndexOnDisk andalso not IsDelivered,
@@ -1147,10 +1288,11 @@ remove(AckRequired, MsgStatus = #msg_status {
                   ok = msg_store_remove(MSCState, IsPersistent, [MsgId])
           end,
     Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end,
-    IndexState2 = case {AckRequired, MsgOnDisk, IndexOnDisk} of
-                      {false, true, false} -> Rem(), IndexState1;
-                      {false, true,  true} -> Rem(), Ack();
-                      _                    -> IndexState1
+    IndexState2 = case {AckRequired, MsgInStore, IndexOnDisk} of
+                      {false, true,  false} -> Rem(), IndexState1;
+                      {false, true,   true} -> Rem(), Ack();
+                      {false, false,  true} -> Ack();
+                      _                     -> IndexState1
                   end,
 
     %% 3. If an ack is required, add something sensible to PA
@@ -1161,153 +1303,215 @@ remove(AckRequired, MsgStatus = #msg_status {
                                     {SeqId, StateN};
                            false -> {undefined, State}
                        end,
-
-    PCount1      = PCount      - one_if(IsPersistent andalso not AckRequired),
-    RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined),
-
+    State2       = case AckRequired of
+                       false -> stats({-1, 0}, {MsgStatus, none},     State1);
+                       true  -> stats({-1, 1}, {MsgStatus, MsgStatus}, State1)
+                   end,
     {AckTag, maybe_update_rates(
-               State1 #vqstate {ram_msg_count    = RamMsgCount1,
-                                out_counter      = OutCount + 1,
-                                index_state      = IndexState2,
-                                len              = Len - 1,
-                                persistent_count = PCount1})}.
-
-purge_betas_and_deltas(LensByStore,
-                       State = #vqstate { q3                = Q3,
-                                          index_state       = IndexState,
-                                          msg_store_clients = MSCState }) ->
+               State2 #vqstate {out_counter = OutCount + 1,
+                                index_state = IndexState2})}.
+
+purge_betas_and_deltas(State = #vqstate { q3 = Q3 }) ->
     case ?QUEUE:is_empty(Q3) of
-        true  -> {LensByStore, State};
-        false -> {LensByStore1, IndexState1} =
-                     remove_queue_entries(fun ?QUEUE:foldl/3, Q3,
-                                          LensByStore, IndexState, MSCState),
-                 purge_betas_and_deltas(LensByStore1,
-                                        maybe_deltas_to_betas(
-                                          State #vqstate {
-                                            q3          = ?QUEUE:new(),
-                                            index_state = IndexState1 }))
+        true  -> State;
+        false -> State1 = remove_queue_entries(Q3, State),
+                 purge_betas_and_deltas(maybe_deltas_to_betas(
+                                          State1#vqstate{q3 = ?QUEUE:new()}))
     end.
 
-remove_queue_entries(Fold, Q, LensByStore, IndexState, MSCState) ->
-    {MsgIdsByStore, Delivers, Acks} =
-        Fold(fun remove_queue_entries1/2, {orddict:new(), [], []}, Q),
+remove_queue_entries(Q, State = #vqstate{index_state       = IndexState,
+                                         msg_store_clients = MSCState}) ->
+    {MsgIdsByStore, Delivers, Acks, State1} =
+        ?QUEUE:foldl(fun remove_queue_entries1/2,
+                     {orddict:new(), [], [], State}, Q),
     ok = orddict:fold(fun (IsPersistent, MsgIds, ok) ->
                               msg_store_remove(MSCState, IsPersistent, MsgIds)
                       end, ok, MsgIdsByStore),
-    {sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore),
-     rabbit_queue_index:ack(Acks,
-                            rabbit_queue_index:deliver(Delivers, IndexState))}.
+    IndexState1 = rabbit_queue_index:ack(
+                    Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
+    State1#vqstate{index_state = IndexState1}.
 
 remove_queue_entries1(
-  #msg_status { msg_id = MsgId, seq_id = SeqId,
-                is_delivered = IsDelivered, msg_on_disk = MsgOnDisk,
-                index_on_disk = IndexOnDisk, is_persistent = IsPersistent },
-  {MsgIdsByStore, Delivers, Acks}) ->
-    {case MsgOnDisk of
+  #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered,
+                msg_in_store = MsgInStore, index_on_disk = IndexOnDisk,
+                is_persistent = IsPersistent} = MsgStatus,
+  {MsgIdsByStore, Delivers, Acks, State}) ->
+    {case MsgInStore of
          true  -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
          false -> MsgIdsByStore
      end,
      cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
-     cons_if(IndexOnDisk, SeqId, Acks)}.
-
-sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) ->
-    orddict:fold(
-      fun (IsPersistent, MsgIds, LensByStore1) ->
-              orddict:update_counter(IsPersistent, length(MsgIds), LensByStore1)
-      end, LensByStore, MsgIdsByStore).
+     cons_if(IndexOnDisk, SeqId, Acks),
+     stats({-1, 0}, {MsgStatus, none}, State)}.
 
 %%----------------------------------------------------------------------------
 %% Internal gubbins for publishing
 %%----------------------------------------------------------------------------
 
 maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
-                                  msg_on_disk = true }, _MSCState) ->
-    MsgStatus;
+                                  msg_in_store = true }, State) ->
+    {MsgStatus, State};
 maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
                                  msg = Msg, msg_id = MsgId,
-                                 is_persistent = IsPersistent }, MSCState)
+                                 is_persistent = IsPersistent },
+                        State = #vqstate{ msg_store_clients = MSCState,
+                                          disk_write_count  = Count})
   when Force orelse IsPersistent ->
-    Msg1 = Msg #basic_message {
-             %% don't persist any recoverable decoded properties
-             content = rabbit_binary_parser:clear_decoded_content(
-                         Msg #basic_message.content)},
-    ok = msg_store_write(MSCState, IsPersistent, MsgId, Msg1),
-    MsgStatus #msg_status { msg_on_disk = true };
-maybe_write_msg_to_disk(_Force, MsgStatus, _MSCState) ->
-    MsgStatus.
+    case persist_to(MsgStatus) of
+        msg_store   -> ok = msg_store_write(MSCState, IsPersistent, MsgId,
+                                            prepare_to_store(Msg)),
+                       {MsgStatus#msg_status{msg_in_store = true},
+                        State#vqstate{disk_write_count = Count + 1}};
+        queue_index -> {MsgStatus, State}
+    end;
+maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
+    {MsgStatus, State}.
 
 maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
-                                    index_on_disk = true }, IndexState) ->
-    true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION
-    {MsgStatus, IndexState};
+                                    index_on_disk = true }, State) ->
+    {MsgStatus, State};
 maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
+                                   msg           = Msg,
                                    msg_id        = MsgId,
                                    seq_id        = SeqId,
                                    is_persistent = IsPersistent,
                                    is_delivered  = IsDelivered,
-                                   msg_props     = MsgProps}, IndexState)
+                                   msg_props     = MsgProps},
+                          State = #vqstate{target_ram_count = TargetRamCount,
+                                           disk_write_count = DiskWriteCount,
+                                           index_state      = IndexState})
   when Force orelse IsPersistent ->
-    true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION
+    {MsgOrId, DiskWriteCount1} =
+        case persist_to(MsgStatus) of
+            msg_store   -> {MsgId, DiskWriteCount};
+            queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+        end,
     IndexState1 = rabbit_queue_index:publish(
-                    MsgId, SeqId, MsgProps, IsPersistent, IndexState),
-    {MsgStatus #msg_status { index_on_disk = true },
-     maybe_write_delivered(IsDelivered, SeqId, IndexState1)};
-maybe_write_index_to_disk(_Force, MsgStatus, IndexState) ->
-    {MsgStatus, IndexState}.
-
-maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus,
-                    State = #vqstate { index_state       = IndexState,
-                                       msg_store_clients = MSCState }) ->
-    MsgStatus1 = maybe_write_msg_to_disk(ForceMsg, MsgStatus, MSCState),
-    {MsgStatus2, IndexState1} =
-        maybe_write_index_to_disk(ForceIndex, MsgStatus1, IndexState),
-    {MsgStatus2, State #vqstate { index_state = IndexState1 }}.
+                    MsgOrId, SeqId, MsgProps, IsPersistent, TargetRamCount,
+                    IndexState),
+    IndexState2 = maybe_write_delivered(IsDelivered, SeqId, IndexState1),
+    {MsgStatus#msg_status{index_on_disk = true},
+     State#vqstate{index_state      = IndexState2,
+                   disk_write_count = DiskWriteCount1}};
+
+maybe_write_index_to_disk(_Force, MsgStatus, State) ->
+    {MsgStatus, State}.
+
+maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+    {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+    maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+determine_persist_to(#basic_message{
+                        content = #content{properties     = Props,
+                                           properties_bin = PropsBin}},
+                     #message_properties{size = BodySize}) ->
+    {ok, IndexMaxSize} = application:get_env(
+                           rabbit, queue_index_embed_msgs_below),
+    %% The >= is so that you can set the env to 0 and never persist
+    %% to the index.
+    %%
+    %% We want this to be fast, so we avoid size(term_to_binary())
+    %% here, or using the term size estimation from truncate.erl, both
+    %% of which are too slow. So instead, if the message body size
+    %% goes over the limit then we avoid any other checks.
+    %%
+    %% If it doesn't we need to decide if the properties will push
+    %% it past the limit. If we have the encoded properties (usual
+    %% case) we can just check their size. If we don't (message came
+    %% via the direct client), we make a guess based on the number of
+    %% headers.
+    case BodySize >= IndexMaxSize of
+        true  -> msg_store;
+        false -> Est = case is_binary(PropsBin) of
+                           true  -> BodySize + size(PropsBin);
+                           false -> #'P_basic'{headers = Hs} = Props,
+                                    case Hs of
+                                        undefined -> 0;
+                                        _         -> length(Hs)
+                                    end * ?HEADER_GUESS_SIZE + BodySize
+                       end,
+                 case Est >= IndexMaxSize of
+                     true  -> msg_store;
+                     false -> queue_index
+                 end
+    end.
+
+persist_to(#msg_status{persist_to = To}) -> To.
+
+prepare_to_store(Msg) ->
+    Msg#basic_message{
+      %% don't persist any recoverable decoded properties
+      content = rabbit_binary_parser:clear_decoded_content(
+                  Msg #basic_message.content)}.
 
 %%----------------------------------------------------------------------------
 %% Internal gubbins for acks
 %%----------------------------------------------------------------------------
 
-record_pending_ack(#msg_status { seq_id = SeqId, msg = Msg } = MsgStatus,
+record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus,
                    State = #vqstate { ram_pending_ack  = RPA,
                                       disk_pending_ack = DPA,
+                                      qi_pending_ack   = QPA,
                                       ack_in_counter   = AckInCount}) ->
-    {RPA1, DPA1} =
-        case Msg of
-            undefined -> {RPA, gb_trees:insert(SeqId, MsgStatus, DPA)};
-            _         -> {gb_trees:insert(SeqId, MsgStatus, RPA), DPA}
+    Insert = fun (Tree) -> gb_trees:insert(SeqId, MsgStatus, Tree) end,
+    {RPA1, DPA1, QPA1} =
+        case {msg_in_ram(MsgStatus), persist_to(MsgStatus)} of
+            {false, _}           -> {RPA, Insert(DPA), QPA};
+            {_,     queue_index} -> {RPA, DPA, Insert(QPA)};
+            {_,     msg_store}   -> {Insert(RPA), DPA, QPA}
         end,
     State #vqstate { ram_pending_ack  = RPA1,
                      disk_pending_ack = DPA1,
+                     qi_pending_ack   = QPA1,
                      ack_in_counter   = AckInCount + 1}.
 
 lookup_pending_ack(SeqId, #vqstate { ram_pending_ack  = RPA,
-                                     disk_pending_ack = DPA }) ->
+                                     disk_pending_ack = DPA,
+                                     qi_pending_ack   = QPA}) ->
     case gb_trees:lookup(SeqId, RPA) of
         {value, V} -> V;
-        none       -> gb_trees:get(SeqId, DPA)
+        none       -> case gb_trees:lookup(SeqId, DPA) of
+                          {value, V} -> V;
+                          none       -> gb_trees:get(SeqId, QPA)
+                      end
     end.
 
-remove_pending_ack(SeqId, State = #vqstate { ram_pending_ack  = RPA,
-                                             disk_pending_ack = DPA }) ->
+%% First parameter = UpdateStats
+remove_pending_ack(true, SeqId, State) ->
+    {MsgStatus, State1} = remove_pending_ack(false, SeqId, State),
+    {MsgStatus, stats({0, -1}, {MsgStatus, none}, State1)};
+remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack  = RPA,
+                                                  disk_pending_ack = DPA,
+                                                  qi_pending_ack   = QPA}) ->
     case gb_trees:lookup(SeqId, RPA) of
         {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
                       {V, State #vqstate { ram_pending_ack = RPA1 }};
-        none       -> DPA1 = gb_trees:delete(SeqId, DPA),
-                      {gb_trees:get(SeqId, DPA),
-                       State #vqstate { disk_pending_ack = DPA1 }}
+        none       -> case gb_trees:lookup(SeqId, DPA) of
+                          {value, V} ->
+                              DPA1 = gb_trees:delete(SeqId, DPA),
+                              {V, State#vqstate{disk_pending_ack = DPA1}};
+                          none ->
+                              QPA1 = gb_trees:delete(SeqId, QPA),
+                              {gb_trees:get(SeqId, QPA),
+                               State#vqstate{qi_pending_ack = QPA1}}
+                      end
     end.
 
 purge_pending_ack(KeepPersistent,
                   State = #vqstate { ram_pending_ack   = RPA,
                                      disk_pending_ack  = DPA,
+                                     qi_pending_ack    = QPA,
                                      index_state       = IndexState,
                                      msg_store_clients = MSCState }) ->
     F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
     {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
         rabbit_misc:gb_trees_fold(
-          F, rabbit_misc:gb_trees_fold(F, accumulate_ack_init(), RPA), DPA),
+          F, rabbit_misc:gb_trees_fold(
+               F,  rabbit_misc:gb_trees_fold(
+                     F, accumulate_ack_init(), RPA), DPA), QPA),
     State1 = State #vqstate { ram_pending_ack  = gb_trees:empty(),
-                              disk_pending_ack = gb_trees:empty() },
+                              disk_pending_ack = gb_trees:empty(),
+                              qi_pending_ack   = gb_trees:empty()},
 
     case KeepPersistent of
         true  -> case orddict:find(false, MsgIdsByStore) of
@@ -1328,22 +1532,16 @@ accumulate_ack_init() -> {[], orddict:new(), []}.
 accumulate_ack(#msg_status { seq_id        = SeqId,
                              msg_id        = MsgId,
                              is_persistent = IsPersistent,
-                             msg_on_disk   = MsgOnDisk,
+                             msg_in_store  = MsgInStore,
                              index_on_disk = IndexOnDisk },
                {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
     {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
-     case MsgOnDisk of
+     case MsgInStore of
          true  -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
          false -> MsgIdsByStore
      end,
      [MsgId | AllMsgIds]}.
 
-find_persistent_count(LensByStore) ->
-    case orddict:find(true, LensByStore) of
-        error     -> 0;
-        {ok, Len} -> Len
-    end.
-
 %%----------------------------------------------------------------------------
 %% Internal plumbing for confirms (aka publisher acks)
 %%----------------------------------------------------------------------------
@@ -1385,19 +1583,25 @@ msg_indices_written_to_disk(Callback, MsgIdSet) ->
                                            gb_sets:union(MIOD, Confirmed) })
              end).
 
+msgs_and_indices_written_to_disk(Callback, MsgIdSet) ->
+    Callback(?MODULE,
+             fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end).
+
 %%----------------------------------------------------------------------------
 %% Internal plumbing for requeue
 %%----------------------------------------------------------------------------
 
 publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
     {Msg, State1} = read_msg(MsgStatus, State),
-    {MsgStatus#msg_status { msg = Msg }, inc_ram_msg_count(State1)};
+    MsgStatus1 = MsgStatus#msg_status { msg = Msg },
+    {MsgStatus1, stats({1, -1}, {MsgStatus, MsgStatus1}, State1)};
 publish_alpha(MsgStatus, State) ->
-    {MsgStatus, inc_ram_msg_count(State)}.
+    {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, State)}.
 
 publish_beta(MsgStatus, State) ->
     {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State),
-    {m(trim_msg_status(MsgStatus1)), State1}.
+    MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+    {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, State1)}.
 
 %% Rebuild queue, inserting sequence ids to maintain ordering
 queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
@@ -1433,13 +1637,14 @@ delta_merge(SeqIds, Delta, MsgIds, State) ->
                             msg_from_pending_ack(SeqId, State0),
                         {_MsgStatus, State2} =
                             maybe_write_to_disk(true, true, MsgStatus, State1),
-                        {expand_delta(SeqId, Delta0), [MsgId | MsgIds0], State2}
+                        {expand_delta(SeqId, Delta0), [MsgId | MsgIds0],
+                         stats({1, -1}, {MsgStatus, none}, State2)}
                 end, {Delta, MsgIds, State}, SeqIds).
 
 %% Mostly opposite of record_pending_ack/2
 msg_from_pending_ack(SeqId, State) ->
     {#msg_status { msg_props = MsgProps } = MsgStatus, State1} =
-        remove_pending_ack(SeqId, State),
+        remove_pending_ack(false, SeqId, State),
     {MsgStatus #msg_status {
        msg_props = MsgProps #message_properties { needs_confirming = false } },
      State1}.
@@ -1463,6 +1668,9 @@ ram_ack_iterator(State) ->
 disk_ack_iterator(State) ->
     {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
 
+qi_ack_iterator(State) ->
+    {ack, gb_trees:iterator(State#vqstate.qi_pending_ack)}.
+
 msg_iterator(State) -> istate(start, State).
 
 istate(start, State) -> {q4,    State#vqstate.q4,    State};
@@ -1492,7 +1700,8 @@ next({delta, Delta, [], State}, IndexState) ->
     next({delta, Delta, State}, IndexState);
 next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
     case (gb_trees:is_defined(SeqId, State#vqstate.ram_pending_ack) orelse
-          gb_trees:is_defined(SeqId, State#vqstate.disk_pending_ack)) of
+          gb_trees:is_defined(SeqId, State#vqstate.disk_pending_ack) orelse
+          gb_trees:is_defined(SeqId, State#vqstate.qi_pending_ack)) of
         false -> Next = {delta, Delta, Rest, State},
                  {value, beta_msg_status(M), false, Next, IndexState};
         true  -> next({delta, Delta, Rest, State}, IndexState)
@@ -1589,10 +1798,12 @@ limit_ram_acks(Quota, State = #vqstate { ram_pending_ack  = RPA,
             {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
             {MsgStatus1, State1} =
                 maybe_write_to_disk(true, false, MsgStatus, State),
-            DPA1 = gb_trees:insert(SeqId, m(trim_msg_status(MsgStatus1)), DPA),
+            MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+            DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA),
             limit_ram_acks(Quota - 1,
-                           State1 #vqstate { ram_pending_ack  = RPA1,
-                                             disk_pending_ack = DPA1 })
+                           stats({0, 0}, {MsgStatus, MsgStatus2},
+                                 State1 #vqstate { ram_pending_ack  = RPA1,
+                                                   disk_pending_ack = DPA1 }))
     end.
 
 permitted_beta_count(#vqstate { len = 0 }) ->
@@ -1653,8 +1864,12 @@ maybe_deltas_to_betas(State = #vqstate {
                         delta                = Delta,
                         q3                   = Q3,
                         index_state          = IndexState,
+                        ram_msg_count        = RamMsgCount,
+                        ram_bytes            = RamBytes,
                         ram_pending_ack      = RPA,
                         disk_pending_ack     = DPA,
+                        qi_pending_ack       = QPA,
+                        disk_read_count      = DiskReadCount,
                         transient_threshold  = TransientThreshold }) ->
     #delta { start_seq_id = DeltaSeqId,
              count        = DeltaCount,
@@ -1664,9 +1879,13 @@ maybe_deltas_to_betas(State = #vqstate {
                    DeltaSeqIdEnd]),
     {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
                                                   IndexState),
-    {Q3a, IndexState2} = betas_from_index_entries(List, TransientThreshold,
-                                                  RPA, DPA, IndexState1),
-    State1 = State #vqstate { index_state = IndexState2 },
+    {Q3a, RamCountsInc, RamBytesInc, IndexState2} =
+        betas_from_index_entries(List, TransientThreshold,
+                                 RPA, DPA, QPA, IndexState1),
+    State1 = State #vqstate { index_state       = IndexState2,
+                              ram_msg_count     = RamMsgCount   + RamCountsInc,
+                              ram_bytes         = RamBytes      + RamBytesInc,
+                              disk_read_count   = DiskReadCount + RamCountsInc},
     case ?QUEUE:len(Q3a) of
         0 ->
             %% we ignored every message in the segment due to it being
@@ -1724,23 +1943,21 @@ push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
                      {empty, _Q} ->
                          {Quota, State};
                      {{value, MsgStatus}, Qa} ->
-                         {MsgStatus1 = #msg_status { msg_on_disk = true },
-                          State1 = #vqstate { ram_msg_count = RamMsgCount }} =
+                         {MsgStatus1, State1} =
                              maybe_write_to_disk(true, false, MsgStatus, State),
                          MsgStatus2 = m(trim_msg_status(MsgStatus1)),
-                         State2 = Consumer(MsgStatus2, Qa,
-                                           State1 #vqstate {
-                                             ram_msg_count = RamMsgCount - 1 }),
+                         State2 = stats(
+                                    ready0, {MsgStatus, MsgStatus2}, State1),
+                         State3 = Consumer(MsgStatus2, Qa, State2),
                          push_alphas_to_betas(Generator, Consumer, Quota - 1,
-                                              Qa, State2)
+                                              Qa, State3)
                  end
     end.
 
-push_betas_to_deltas(Quota, State = #vqstate { q2          = Q2,
-                                               delta       = Delta,
-                                               q3          = Q3,
-                                               index_state = IndexState }) ->
-    PushState = {Quota, Delta, IndexState},
+push_betas_to_deltas(Quota, State = #vqstate { q2    = Q2,
+                                               delta = Delta,
+                                               q3    = Q3}) ->
+    PushState = {Quota, Delta, State},
     {Q3a, PushState1} = push_betas_to_deltas(
                           fun ?QUEUE:out_r/1,
                           fun rabbit_queue_index:next_segment_boundary/1,
@@ -1749,11 +1966,10 @@ push_betas_to_deltas(Quota, State = #vqstate { q2          = Q2,
                           fun ?QUEUE:out/1,
                           fun (Q2MinSeqId) -> Q2MinSeqId end,
                           Q2, PushState1),
-    {_, Delta1, IndexState1} = PushState2,
-    State #vqstate { q2          = Q2a,
-                     delta       = Delta1,
-                     q3          = Q3a,
-                     index_state = IndexState1 }.
+    {_, Delta1, State1} = PushState2,
+    State1 #vqstate { q2    = Q2a,
+                      delta = Delta1,
+                      q3    = Q3a }.
 
 push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
     case ?QUEUE:is_empty(Q) of
@@ -1769,11 +1985,9 @@ push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
             end
     end.
 
-push_betas_to_deltas1(_Generator, _Limit, Q,
-                      {0, _Delta, _IndexState} = PushState) ->
+push_betas_to_deltas1(_Generator, _Limit, Q, {0, _Delta, _State} = PushState) ->
     {Q, PushState};
-push_betas_to_deltas1(Generator, Limit, Q,
-                      {Quota, Delta, IndexState} = PushState) ->
+push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State} = PushState) ->
     case Generator(Q) of
         {empty, _Q} ->
             {Q, PushState};
@@ -1781,11 +1995,12 @@ push_betas_to_deltas1(Generator, Limit, Q,
           when SeqId < Limit ->
             {Q, PushState};
         {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
-            {#msg_status { index_on_disk = true }, IndexState1} =
-                maybe_write_index_to_disk(true, MsgStatus, IndexState),
+            {#msg_status { index_on_disk = true }, State1} =
+                maybe_write_index_to_disk(true, MsgStatus, State),
+            State2 = stats(ready0, {MsgStatus, none}, State1),
             Delta1 = expand_delta(SeqId, Delta),
             push_betas_to_deltas1(Generator, Limit, Qa,
-                                  {Quota - 1, Delta1, IndexState1})
+                                  {Quota - 1, Delta1, State2})
     end.
 
 %%----------------------------------------------------------------------------
index d943b5998f14418696df0304ae9c1128c824b740..d3b2be1b5004ebadfcf8416c7d7ddfe05353577c 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_version).
@@ -114,8 +114,8 @@ upgrades_required(Scope) ->
 
 with_upgrade_graph(Fun, Scope) ->
     case rabbit_misc:build_acyclic_graph(
-           fun (Module, Steps) -> vertices(Module, Steps, Scope) end,
-           fun (Module, Steps) -> edges(Module, Steps, Scope) end,
+           fun ({_App, Module, Steps}) -> vertices(Module, Steps, Scope) end,
+           fun ({_App, Module, Steps}) -> edges(Module, Steps, Scope) end,
            rabbit_misc:all_module_attributes(rabbit_upgrade)) of
         {ok, G} -> try
                        Fun(G)
@@ -161,7 +161,7 @@ heads(G) ->
 
 categorise_by_scope(Version) when is_list(Version) ->
     Categorised =
-        [{Scope, Name} || {_Module, Attributes} <-
+        [{Scope, Name} || {_App, _Module, Attributes} <-
                               rabbit_misc:all_module_attributes(rabbit_upgrade),
                           {Name, Scope, _Requires} <- Attributes,
                           lists:member(Name, Version)],
index cfa3add44aea5a270842c1f64f84633bf70d3a3e..9b627adf5d20daaeda772c0bae7b2075bd590039 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_vhost).
@@ -94,13 +94,12 @@ delete(VHostPath) ->
     [ok = Fun() || Fun <- Funs],
     ok.
 
-assert_benign(ok)                   -> ok;
-assert_benign({ok, _})              -> ok;
-assert_benign({error, not_found})   -> ok;
-assert_benign({error, {absent, Q}}) ->
-    %% We have a durable queue on a down node. Removing the mnesia
-    %% entries here is safe. If/when the down node restarts, it will
-    %% clear out the on-disk storage of the queue.
+assert_benign(ok)                 -> ok;
+assert_benign({ok, _})            -> ok;
+assert_benign({error, not_found}) -> ok;
+assert_benign({error, {absent, Q, _}}) ->
+    %% Removing the mnesia entries here is safe. If/when the down node
+    %% restarts, it will clear out the on-disk storage of the queue.
     case rabbit_amqqueue:internal_delete(Q#amqqueue.name) of
         ok                 -> ok;
         {error, not_found} -> ok
index 6fe65c12a2ff0765b39d3c89ff566fa14c2b1ba0..534a8883e18237f67b458e52831fc4d7dbfeebe0 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_vm).
 
--export([memory/0]).
+-export([memory/0, binary/0]).
 
 -define(MAGIC_PLUGINS, ["mochiweb", "webmachine", "cowboy", "sockjs",
                         "rfc4627_jsonrpc"]).
@@ -26,6 +26,7 @@
 -ifdef(use_specs).
 
 -spec(memory/0 :: () -> rabbit_types:infos()).
+-spec(binary/0 :: () -> rabbit_types:infos()).
 
 -endif.
 
 
 %% Like erlang:memory(), but with awareness of rabbit-y things
 memory() ->
-    ConnProcs     = [rabbit_tcp_client_sup, ssl_connection_sup, amqp_sup],
-    QProcs        = [rabbit_amqqueue_sup, rabbit_mirror_queue_slave_sup],
-    MsgIndexProcs = [msg_store_transient, msg_store_persistent],
-    MgmtDbProcs   = [rabbit_mgmt_sup_sup],
-    PluginProcs   = plugin_sups(),
-
-    All = [ConnProcs, QProcs, MsgIndexProcs, MgmtDbProcs, PluginProcs],
-
-    {Sums, _Other} = sum_processes(lists:append(All), [memory]),
+    All = interesting_sups(),
+    {Sums, _Other} = sum_processes(
+                       lists:append(All), distinguishers(), [memory]),
 
-    [Conns, Qs, MsgIndexProc, MgmtDbProc, Plugins] =
-        [aggregate_memory(Names, Sums) || Names <- All],
+    [Qs, QsSlave, ConnsReader, ConnsWriter, ConnsChannel, ConnsOther,
+     MsgIndexProc, MgmtDbProc, Plugins] =
+        [aggregate(Names, Sums, memory, fun (X) -> X end)
+         || Names <- distinguished_interesting_sups()],
 
     Mnesia       = mnesia_memory(),
-    MsgIndexETS  = ets_memory(rabbit_msg_store_ets_index),
-    MgmtDbETS    = ets_memory(rabbit_mgmt_db),
+    MsgIndexETS  = ets_memory([msg_store_persistent, msg_store_transient]),
+    MgmtDbETS    = ets_memory([rabbit_mgmt_db]),
 
     [{total,     Total},
      {processes, Processes},
@@ -59,27 +56,58 @@ memory() ->
      {system,    System}] =
         erlang:memory([total, processes, ets, atom, binary, code, system]),
 
-    OtherProc = Processes - Conns - Qs - MsgIndexProc - Plugins - MgmtDbProc,
-
-    [{total,            Total},
-     {connection_procs, Conns},
-     {queue_procs,      Qs},
-     {plugins,          Plugins},
-     {other_proc,       lists:max([0, OtherProc])}, %% [1]
-     {mnesia,           Mnesia},
-     {mgmt_db,          MgmtDbETS + MgmtDbProc},
-     {msg_index,        MsgIndexETS + MsgIndexProc},
-     {other_ets,        ETS - Mnesia - MsgIndexETS - MgmtDbETS},
-     {binary,           Bin},
-     {code,             Code},
-     {atom,             Atom},
-     {other_system,     System - ETS - Atom - Bin - Code}].
+    OtherProc = Processes
+        - ConnsReader - ConnsWriter - ConnsChannel - ConnsOther
+        - Qs - QsSlave - MsgIndexProc - Plugins - MgmtDbProc,
+
+    [{total,              Total},
+     {connection_readers,  ConnsReader},
+     {connection_writers,  ConnsWriter},
+     {connection_channels, ConnsChannel},
+     {connection_other,    ConnsOther},
+     {queue_procs,         Qs},
+     {queue_slave_procs,   QsSlave},
+     {plugins,             Plugins},
+     {other_proc,          lists:max([0, OtherProc])}, %% [1]
+     {mnesia,              Mnesia},
+     {mgmt_db,             MgmtDbETS + MgmtDbProc},
+     {msg_index,           MsgIndexETS + MsgIndexProc},
+     {other_ets,           ETS - Mnesia - MsgIndexETS - MgmtDbETS},
+     {binary,              Bin},
+     {code,                Code},
+     {atom,                Atom},
+     {other_system,        System - ETS - Atom - Bin - Code}].
 
 %% [1] - erlang:memory(processes) can be less than the sum of its
 %% parts. Rather than display something nonsensical, just silence any
 %% claims about negative memory. See
 %% http://erlang.org/pipermail/erlang-questions/2012-September/069320.html
 
+binary() ->
+    All = interesting_sups(),
+    {Sums, Rest} =
+        sum_processes(
+          lists:append(All),
+          fun (binary, Info, Acc) ->
+                  lists:foldl(fun ({Ptr, Sz, _RefCnt}, Acc0) ->
+                                      sets:add_element({Ptr, Sz}, Acc0)
+                              end, Acc, Info)
+          end, distinguishers(), [{binary, sets:new()}]),
+    [Other, Qs, QsSlave, ConnsReader, ConnsWriter, ConnsChannel, ConnsOther,
+     MsgIndexProc, MgmtDbProc, Plugins] =
+        [aggregate(Names, [{other, Rest} | Sums], binary, fun sum_binary/1)
+         || Names <- [[other] | distinguished_interesting_sups()]],
+    [{connection_readers,  ConnsReader},
+     {connection_writers,  ConnsWriter},
+     {connection_channels, ConnsChannel},
+     {connection_other,    ConnsOther},
+     {queue_procs,         Qs},
+     {queue_slave_procs,   QsSlave},
+     {plugins,             Plugins},
+     {mgmt_db,             MgmtDbProc},
+     {msg_index,           MsgIndexProc},
+     {other,               Other}].
+
 %%----------------------------------------------------------------------------
 
 mnesia_memory() ->
@@ -89,13 +117,38 @@ mnesia_memory() ->
         _   -> 0
     end.
 
-ets_memory(Name) ->
+ets_memory(OwnerNames) ->
+    Owners = [whereis(N) || N <- OwnerNames],
     lists:sum([bytes(ets:info(T, memory)) || T <- ets:all(),
-                                             N <- [ets:info(T, name)],
-                                             N =:= Name]).
+                                             O <- [ets:info(T, owner)],
+                                             lists:member(O, Owners)]).
 
 bytes(Words) ->  Words * erlang:system_info(wordsize).
 
+interesting_sups() ->
+    [[rabbit_amqqueue_sup_sup], conn_sups() | interesting_sups0()].
+
+interesting_sups0() ->
+    MsgIndexProcs = [msg_store_transient, msg_store_persistent],
+    MgmtDbProcs   = [rabbit_mgmt_sup_sup],
+    PluginProcs   = plugin_sups(),
+    [MsgIndexProcs, MgmtDbProcs, PluginProcs].
+
+conn_sups()     -> [rabbit_tcp_client_sup, ssl_connection_sup, amqp_sup].
+conn_sups(With) -> [{Sup, With} || Sup <- conn_sups()].
+
+distinguishers() -> [{rabbit_amqqueue_sup_sup, fun queue_type/1} |
+                     conn_sups(fun conn_type/1)].
+
+distinguished_interesting_sups() ->
+    [[{rabbit_amqqueue_sup_sup, master}],
+     [{rabbit_amqqueue_sup_sup, slave}],
+     conn_sups(reader),
+     conn_sups(writer),
+     conn_sups(channel),
+     conn_sups(other)]
+        ++ interesting_sups0().
+
 plugin_sups() ->
     lists:append([plugin_sup(App) ||
                      {App, _, _} <- rabbit_misc:which_applications(),
@@ -120,13 +173,31 @@ process_name(Pid) ->
 is_plugin("rabbitmq_" ++ _) -> true;
 is_plugin(App)              -> lists:member(App, ?MAGIC_PLUGINS).
 
-aggregate_memory(Names, Sums) ->
-    lists:sum([extract_memory(Name, Sums) || Name <- Names]).
+aggregate(Names, Sums, Key, Fun) ->
+    lists:sum([extract(Name, Sums, Key, Fun) || Name <- Names]).
+
+extract(Name, Sums, Key, Fun) ->
+    case keyfind(Name, Sums) of
+        {value, Accs} -> Fun(keyfetch(Key, Accs));
+        false         -> 0
+    end.
+
+sum_binary(Set) ->
+    sets:fold(fun({_Pt, Sz}, Acc) -> Acc + Sz end, 0, Set).
 
-extract_memory(Name, Sums) ->
-    {value, {_, Accs}} = lists:keysearch(Name, 1, Sums),
-    {value, {memory, V}} = lists:keysearch(memory, 1, Accs),
-    V.
+queue_type(PDict) ->
+    case keyfind(process_name, PDict) of
+        {value, {rabbit_mirror_queue_slave, _}} -> slave;
+        _                                       -> master
+    end.
+
+conn_type(PDict) ->
+    case keyfind(process_name, PDict) of
+        {value, {rabbit_reader,  _}} -> reader;
+        {value, {rabbit_writer,  _}} -> writer;
+        {value, {rabbit_channel, _}} -> channel;
+        _                            -> other
+    end.
 
 %%----------------------------------------------------------------------------
 
@@ -139,14 +210,17 @@ extract_memory(Name, Sums) ->
 -type(info_item() :: {info_key(), info_value()}).
 -type(accumulate() :: fun ((info_key(), info_value(), info_value()) ->
                                   info_value())).
--spec(sum_processes/2 :: ([process()], [info_key()]) ->
+-type(distinguisher() :: fun (([{term(), term()}]) -> atom())).
+-type(distinguishers() :: [{info_key(), distinguisher()}]).
+-spec(sum_processes/3 :: ([process()], distinguishers(), [info_key()]) ->
                               {[{process(), [info_item()]}], [info_item()]}).
--spec(sum_processes/3 :: ([process()], accumulate(), [info_item()]) ->
+-spec(sum_processes/4 :: ([process()], accumulate(), distinguishers(),
+                          [info_item()]) ->
                               {[{process(), [info_item()]}], [info_item()]}).
 -endif.
 
-sum_processes(Names, Items) ->
-    sum_processes(Names, fun (_, X, Y) -> X + Y end,
+sum_processes(Names, Distinguishers, Items) ->
+    sum_processes(Names, fun (_, X, Y) -> X + Y end, Distinguishers,
                   [{Item, 0} || Item <- Items]).
 
 %% summarize the process_info of all processes based on their
@@ -180,10 +254,8 @@ sum_processes(Names, Items) ->
 %% these must match whatever is contained in the '$ancestor' process
 %% dictionary entry. Generally that means for all registered processes
 %% the name should be used.
-sum_processes(Names, Fun, Acc0) ->
-    Items = [Item || {Item, _Val0} <- Acc0],
-    Acc0Dict  = orddict:from_list(Acc0),
-    NameAccs0 = orddict:from_list([{Name, Acc0Dict} || Name <- Names]),
+sum_processes(Names, Fun, Distinguishers, Acc0) ->
+    Items = [Item || {Item, _Blank0} <- Acc0],
     {NameAccs, OtherAcc} =
         lists:foldl(
           fun (Pid, Acc) ->
@@ -199,10 +271,15 @@ sum_processes(Names, Fun, Acc0) ->
                                       [] -> [];
                                       N  -> [N]
                                   end,
-                          accumulate(find_ancestor(Extra, D, Names), Fun,
-                                     orddict:from_list(Vals), Acc)
+                          Name0 = find_ancestor(Extra, D, Names),
+                          Name = case keyfind(Name0, Distinguishers) of
+                                     {value, DistFun} -> {Name0, DistFun(D)};
+                                     false            -> Name0
+                                 end,
+                          accumulate(
+                            Name, Fun, orddict:from_list(Vals), Acc, Acc0)
                   end
-          end, {NameAccs0, Acc0Dict}, processes()),
+          end, {orddict:new(), Acc0}, processes()),
     %% these conversions aren't strictly necessary; we do them simply
     %% for the sake of encapsulating the representation.
     {[{Name, orddict:to_list(Accs)} ||
@@ -210,9 +287,9 @@ sum_processes(Names, Fun, Acc0) ->
      orddict:to_list(OtherAcc)}.
 
 find_ancestor(Extra, D, Names) ->
-    Ancestors = case lists:keysearch('$ancestors', 1, D) of
-                    {value, {_, Ancs}} -> Ancs;
-                    false              -> []
+    Ancestors = case keyfind('$ancestors', D) of
+                    {value, Ancs} -> Ancs;
+                    false         -> []
                 end,
     case lists:splitwith(fun (A) -> not lists:member(A, Names) end,
                          Extra ++ Ancestors) of
@@ -220,8 +297,19 @@ find_ancestor(Extra, D, Names) ->
         {_, [Name | _]} -> Name
     end.
 
-accumulate(undefined, Fun, ValsDict, {NameAccs, OtherAcc}) ->
+accumulate(undefined, Fun, ValsDict, {NameAccs, OtherAcc}, _Acc0) ->
     {NameAccs, orddict:merge(Fun, ValsDict, OtherAcc)};
-accumulate(Name,      Fun, ValsDict, {NameAccs, OtherAcc}) ->
+accumulate(Name,      Fun, ValsDict, {NameAccs, OtherAcc}, Acc0) ->
     F = fun (NameAcc) -> orddict:merge(Fun, ValsDict, NameAcc) end,
-    {orddict:update(Name, F, NameAccs), OtherAcc}.
+    {case orddict:is_key(Name, NameAccs) of
+         true  -> orddict:update(Name, F,       NameAccs);
+         false -> orddict:store( Name, F(Acc0), NameAccs)
+     end, OtherAcc}.
+
+keyfetch(K, L) -> {value, {_, V}} = lists:keysearch(K, 1, L),
+                  V.
+
+keyfind(K, L) -> case lists:keysearch(K, 1, L) of
+                     {value, {_, V}} -> {value, V};
+                     false           -> false
+                 end.
index 1882696efb75d7c73f382bf62bb05cc8be44cd28..7cba7170a4cdc7c797fb55d351b38547ed4f4c62 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(rabbit_writer).
@@ -308,6 +308,7 @@ internal_send_command_async(MethodRecord, Content,
                                             pending   = Pending}) ->
     Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
                              Protocol),
+    rabbit_basic:maybe_gc_large_msg(Content),
     maybe_flush(State#wstate{pending = [Frames | Pending]}).
 
 %% This magic number is the tcp-over-ethernet MSS (1460) minus the
index fcfa90b632df1157f3eef4f83259066ed9741d70..1010bbb51db52bcaf20fe2ae474c6abb9af4c4de 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% Invoke callbacks on startup and termination.
index 4eafd3b1291c8f34016ec40f479b97ecb102ca5c..7b9421eb3ed44a9236401c9a549f1610adb5d497 100644 (file)
 -record(state, {name,
                strategy               :: strategy(),
                children = []          :: [child_rec()],
-               dynamics               :: ?DICT() | ?SET(),
+               dynamics               :: ?DICT:?DICT() | ?SETS:?SET(),
                intensity              :: non_neg_integer(),
                period                 :: pos_integer(),
                restarts = [],
            MaxT            :: non_neg_integer()},
            [ChildSpec :: child_spec()]}}
     | ignore.
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+    [{init,1}];
+behaviour_info(_Other) ->
+    undefined.
+
 -endif.
 -define(restarting(_Pid_), {restarting,_Pid_}).
 
diff --git a/rabbitmq-server/src/supervisor2_tests.erl b/rabbitmq-server/src/supervisor2_tests.erl
deleted file mode 100644 (file)
index 4d362e5..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(supervisor2_tests).
--behaviour(supervisor2).
-
--export([test_all/0, start_link/0]).
--export([init/1]).
-
-test_all() ->
-    ok = check_shutdown(stop,    200, 200, 2000),
-    ok = check_shutdown(ignored,   1,   2, 2000).
-
-check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
-    {ok, Sup} = supervisor2:start_link(?MODULE, [SupTimeout]),
-    Res = lists:foldl(
-            fun (I, ok) ->
-                    TestSupPid = erlang:whereis(?MODULE),
-                    ChildPids =
-                        [begin
-                             {ok, ChildPid} =
-                                 supervisor2:start_child(TestSupPid, []),
-                             ChildPid
-                         end || _ <- lists:seq(1, ChildCount)],
-                    MRef = erlang:monitor(process, TestSupPid),
-                    [P ! SigStop || P <- ChildPids],
-                    ok = supervisor2:terminate_child(Sup, test_sup),
-                    {ok, _} = supervisor2:restart_child(Sup, test_sup),
-                    receive
-                        {'DOWN', MRef, process, TestSupPid, shutdown} ->
-                            ok;
-                        {'DOWN', MRef, process, TestSupPid, Reason} ->
-                            {error, {I, Reason}}
-                    end;
-                (_, R) ->
-                    R
-            end, ok, lists:seq(1, Iterations)),
-    unlink(Sup),
-    exit(Sup, shutdown),
-    Res.
-
-start_link() ->
-    Pid = spawn_link(fun () ->
-                             process_flag(trap_exit, true),
-                             receive stop -> ok end
-                     end),
-    {ok, Pid}.
-
-init([Timeout]) ->
-    {ok, {{one_for_one, 0, 1},
-          [{test_sup, {supervisor2, start_link,
-                       [{local, ?MODULE}, ?MODULE, []]},
-            transient, Timeout, supervisor, [?MODULE]}]}};
-init([]) ->
-    {ok, {{simple_one_for_one, 0, 1},
-          [{test_worker, {?MODULE, start_link, []},
-            temporary, 1000, worker, [?MODULE]}]}}.
index 047b85c5a3575373b879ccb421d1db3b6e1bc45f..75f216c3dd6cffbffc4fbc80def48770122b6813 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(tcp_acceptor).
index 10b10e4a02a746c052f0b4cb9ba3a863593d767a..22c886e0ab3f1f4c939e4d78b8c7dcdb8c4c0d8b 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(tcp_acceptor_sup).
index 7c464c6a268aa83ea6cbd40193755d62b06d7175..307249af09c8e2eaa86667722cf69913d9284bab 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(tcp_listener).
index b3e1c69b29700517f3643828732ef6cc497db94b..94bdecc28ceaa3408da908b4712d1464dd5c17c3 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(tcp_listener_sup).
diff --git a/rabbitmq-server/src/test_sup.erl b/rabbitmq-server/src/test_sup.erl
deleted file mode 100644 (file)
index d5b2a26..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(test_sup).
-
--behaviour(supervisor2).
-
--export([test_supervisor_delayed_restart/0,
-         init/1, start_child/0]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(test_supervisor_delayed_restart/0 :: () -> 'passed').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-test_supervisor_delayed_restart() ->
-    passed = with_sup(simple_one_for_one,
-                      fun (SupPid) ->
-                              {ok, _ChildPid} =
-                                  supervisor2:start_child(SupPid, []),
-                              test_supervisor_delayed_restart(SupPid)
-                      end),
-    passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1).
-
-test_supervisor_delayed_restart(SupPid) ->
-    ok = ping_child(SupPid),
-    ok = exit_child(SupPid),
-    timer:sleep(100),
-    ok = ping_child(SupPid),
-    ok = exit_child(SupPid),
-    timer:sleep(100),
-    timeout = ping_child(SupPid),
-    timer:sleep(1010),
-    ok = ping_child(SupPid),
-    passed.
-
-with_sup(RestartStrategy, Fun) ->
-    {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]),
-    Res = Fun(SupPid),
-    unlink(SupPid),
-    exit(SupPid, shutdown),
-    Res.
-
-init([RestartStrategy]) ->
-    {ok, {{RestartStrategy, 1, 1},
-          [{test, {test_sup, start_child, []}, {permanent, 1},
-            16#ffffffff, worker, [test_sup]}]}}.
-
-start_child() ->
-    {ok, proc_lib:spawn_link(fun run_child/0)}.
-
-ping_child(SupPid) ->
-    Ref = make_ref(),
-    with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end),
-    receive {pong, Ref} -> ok
-    after 1000          -> timeout
-    end.
-
-exit_child(SupPid) ->
-    with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end),
-    ok.
-
-with_child_pid(SupPid, Fun) ->
-    case supervisor2:which_children(SupPid) of
-        [{_Id, undefined, worker, [test_sup]}] -> ok;
-        [{_Id,  ChildPid, worker, [test_sup]}] -> Fun(ChildPid);
-        []                                     -> ok
-    end.
-
-run_child() ->
-    receive {ping, Ref, Pid} -> Pid ! {pong, Ref},
-                                run_child()
-    end.
index 820af1bf86105f847a66f4d9654ff8d0428efc66..8feae35f88853ca5895dcab2209bd6be17741c94 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(truncate).
@@ -45,7 +45,7 @@ report(List, Params) when is_list(List) -> [case Item of
 report(Other, Params)                   -> term(Other, Params).
 
 term(Thing, {Max, {Content, Struct, ContentDec, StructDec}}) ->
-    case term_limit(Thing, Max) of
+    case exceeds_size(Thing, Max) of
         true  -> term(Thing, true, #params{content     = Content,
                                            struct      = Struct,
                                            content_dec = ContentDec,
@@ -93,7 +93,7 @@ shrink_list([H|T], #params{content     = Content,
 %% sizes. This is all going to be rather approximate though, these
 %% sizes are probably not very "fair" but we are just trying to see if
 %% we reach a fairly arbitrary limit anyway though.
-term_limit(Thing, Max) ->
+exceeds_size(Thing, Max) ->
     case term_size(Thing, Max, erlang:system_info(wordsize)) of
         limit_exceeded -> true;
         _              -> false
index 948956a359c86d636ec238e86c1b566c942ef6cb..304518bc5eff85c6ce9021dbeefcbf63a26f79ab 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 %% In practice Erlang shouldn't be allowed to grow to more than a half
diff --git a/rabbitmq-server/src/vm_memory_monitor_tests.erl b/rabbitmq-server/src/vm_memory_monitor_tests.erl
deleted file mode 100644 (file)
index 1f7cea3..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(vm_memory_monitor_tests).
-
--export([all_tests/0]).
-
-%% ---------------------------------------------------------------------------
-%% Tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
-    lists:foreach(fun ({S, {K, V}}) ->
-                          {K, V} = vm_memory_monitor:parse_line_linux(S)
-                  end,
-                  [{"MemTotal:      0 kB",        {'MemTotal', 0}},
-                   {"MemTotal:      502968 kB  ", {'MemTotal', 515039232}},
-                   {"MemFree:         178232 kB", {'MemFree',  182509568}},
-                   {"MemTotal:         50296888", {'MemTotal', 50296888}},
-                   {"MemTotal         502968 kB", {'MemTotal', 515039232}},
-                   {"MemTotal     50296866   ",   {'MemTotal', 50296866}}]),
-    passed.
index b1dba5a2335fb405997754bb0de7cb2f5f74eacc..99b227e39275aedc605cd5141cff295fcbcda638 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(worker_pool).
 
 %% Generic worker pool manager.
 %%
-%% Supports nested submission of jobs (nested jobs always run
-%% immediately in current worker process).
+%% Submitted jobs are functions. They can be executed asynchronously
+%% (using worker_pool:submit/1, worker_pool:submit/2) or synchronously
+%% (using worker_pool:submit_async/1).
 %%
-%% Possible future enhancements:
+%% We typically use the worker pool if we want to limit the maximum
+%% parallelism of some job. We are not trying to dodge the cost of
+%% creating Erlang processes.
 %%
-%% 1. Allow priorities (basically, change the pending queue to a
-%% priority_queue).
+%% Supports nested submission of jobs and two execution modes:
+%% 'single' and 'reuse'. Jobs executed in 'single' mode are invoked in
+%% a one-off process. Those executed in 'reuse' mode are invoked in a
+%% worker process out of the pool. Nested jobs are always executed
+%% immediately in current worker process.
+%%
+%% 'single' mode is offered to work around a bug in Mnesia: after
+%% network partitions reply messages for prior failed requests can be
+%% sent to Mnesia clients - a reused worker pool process can crash on
+%% receiving one.
+%%
+%% Caller submissions are enqueued internally. When the next worker
+%% process is available, it communicates it to the pool and is
+%% assigned a job to execute. If job execution fails with an error, no
+%% response is returned to the caller.
+%%
+%% Worker processes prioritise certain command-and-control messages
+%% from the pool.
+%%
+%% Future improvement points: job prioritisation.
 
 -behaviour(gen_server2).
 
--export([start_link/0, submit/1, submit_async/1, ready/1, idle/1]).
+-export([start_link/0, submit/1, submit/2, submit_async/1, ready/1,
+         idle/1]).
 
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2,
          terminate/2, code_change/3]).
@@ -41,6 +63,7 @@
 
 -spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
 -spec(submit/1 :: (fun (() -> A) | mfargs()) -> A).
+-spec(submit/2 :: (fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
 -spec(submit_async/1 :: (fun (() -> any()) | mfargs()) -> 'ok').
 -spec(ready/1 :: (pid()) -> 'ok').
 -spec(idle/1 :: (pid()) -> 'ok').
@@ -61,10 +84,14 @@ start_link() -> gen_server2:start_link({local, ?SERVER}, ?MODULE, [],
                                        [{timeout, infinity}]).
 
 submit(Fun) ->
+    submit(Fun, reuse).
+
+%% ProcessModel =:= single is for working around the mnesia_locker bug.
+submit(Fun, ProcessModel) ->
     case get(worker_pool_worker) of
         true -> worker_pool_worker:run(Fun);
         _    -> Pid = gen_server2:call(?SERVER, {next_free, self()}, infinity),
-                worker_pool_worker:submit(Pid, Fun)
+                worker_pool_worker:submit(Pid, Fun, ProcessModel)
     end.
 
 submit_async(Fun) -> gen_server2:cast(?SERVER, {run_async, Fun}).
index 89d2ed46daaab79b9a91768f154b22ecc80d2cdc..99afd91ea58e39a04225b9f58c8e668ebb3035cc 100644 (file)
@@ -11,7 +11,7 @@
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(worker_pool_sup).
index beb95bc6319637ccb42296e72bc5f0ea5ca5c720..6e66d8518eec573e2272749b581670701084408f 100644 (file)
 %% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2014 GoPivotal, Inc.  All rights reserved.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
 -module(worker_pool_worker).
 
+%% Executes jobs (functions) submitted to a worker pool with worker_pool:submit/1,
+%% worker_pool:submit/2 or worker_pool:submit_async/1.
+%%
+%% See worker_pool for an overview.
+
 -behaviour(gen_server2).
 
--export([start_link/0, next_job_from/2, submit/2, submit_async/2, run/1]).
+-export([start_link/0, next_job_from/2, submit/3, submit_async/2, run/1]).
 
 -export([set_maximum_since_use/2]).
 
@@ -33,7 +38,7 @@
 
 -spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
 -spec(next_job_from/2 :: (pid(), pid()) -> 'ok').
--spec(submit/2 :: (pid(), fun (() -> A) | mfargs()) -> A).
+-spec(submit/3 :: (pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
 -spec(submit_async/2 :: (pid(), fun (() -> any()) | mfargs()) -> 'ok').
 -spec(run/1 :: (fun (() -> A)) -> A; (mfargs()) -> any()).
 -spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
@@ -53,8 +58,8 @@ start_link() ->
 next_job_from(Pid, CPid) ->
     gen_server2:cast(Pid, {next_job_from, CPid}).
 
-submit(Pid, Fun) ->
-    gen_server2:call(Pid, {submit, Fun, self()}, infinity).
+submit(Pid, Fun, ProcessModel) ->
+    gen_server2:call(Pid, {submit, Fun, self(), ProcessModel}, infinity).
 
 submit_async(Pid, Fun) ->
     gen_server2:cast(Pid, {submit_async, Fun}).
@@ -62,10 +67,22 @@ submit_async(Pid, Fun) ->
 set_maximum_since_use(Pid, Age) ->
     gen_server2:cast(Pid, {set_maximum_since_use, Age}).
 
-run({M, F, A}) ->
-    apply(M, F, A);
-run(Fun) ->
-    Fun().
+run({M, F, A}) -> apply(M, F, A);
+run(Fun)       -> Fun().
+
+run(Fun, reuse) ->
+    run(Fun);
+run(Fun, single) ->
+    Self = self(),
+    Ref = make_ref(),
+    spawn_link(fun () ->
+                       put(worker_pool_worker, true),
+                       Self ! {Ref, run(Fun)},
+                       unlink(Self)
+               end),
+    receive
+        {Ref, Res} -> Res
+    end.
 
 %%----------------------------------------------------------------------------
 
@@ -81,12 +98,12 @@ prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
 prioritise_cast({next_job_from, _CPid},        _Len, _State) -> 7;
 prioritise_cast(_Msg,                          _Len, _State) -> 0.
 
-handle_call({submit, Fun, CPid}, From, undefined) ->
-    {noreply, {job, CPid, From, Fun}, hibernate};
+handle_call({submit, Fun, CPid, ProcessModel}, From, undefined) ->
+    {noreply, {job, CPid, From, Fun, ProcessModel}, hibernate};
 
-handle_call({submit, Fun, CPid}, From, {from, CPid, MRef}) ->
+handle_call({submit, Fun, CPid, ProcessModel}, From, {from, CPid, MRef}) ->
     erlang:demonitor(MRef),
-    gen_server2:reply(From, run(Fun)),
+    gen_server2:reply(From, run(Fun, ProcessModel)),
     ok = worker_pool:idle(self()),
     {noreply, undefined, hibernate};
 
@@ -97,8 +114,8 @@ handle_cast({next_job_from, CPid}, undefined) ->
     MRef = erlang:monitor(process, CPid),
     {noreply, {from, CPid, MRef}, hibernate};
 
-handle_cast({next_job_from, CPid}, {job, CPid, From, Fun}) ->
-    gen_server2:reply(From, run(Fun)),
+handle_cast({next_job_from, CPid}, {job, CPid, From, Fun, ProcessModel}) ->
+    gen_server2:reply(From, run(Fun, ProcessModel)),
     ok = worker_pool:idle(self()),
     {noreply, undefined, hibernate};
 
index 323c04c53a375f6519c5e56368c9e94832aefe29..fe6e347c1a0f8637d0d3132b2aa9bbfb649d97ab 100644 (file)
@@ -1 +1 @@
-VERSION?=3.3.5
+VERSION?=3.5.4